The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def get_example(self, i): id = self.all_keys[i] img = None val = self.db.get(id.encode()) img = cv2.imdecode(np.fromstring(val, dtype=np.uint8), 1) img = self.do_augmentation(img) img_color = img img_color = self.preprocess_image(img_color) img_line = XDoG(img) img_line = cv2.cvtColor(img_line, cv2.COLOR_GRAY2RGB) #if img_line.ndim == 2: # img_line = img_line[:, :, np.newaxis] img_line = self.preprocess_image(img_line) return img_line, img_color
Example 2
def __init__(self, image, samplefac=10, colors=256): # Check Numpy if np is None: raise RuntimeError("Need Numpy for the NeuQuant algorithm.") # Check image if image.size[0] * image.size[1] < NeuQuant.MAXPRIME: raise IOError("Image is too small") if image.mode != "RGBA": raise IOError("Image mode should be RGBA.") # Initialize self.setconstants(samplefac, colors) self.pixels = np.fromstring(image.tostring(), np.uint32) self.setUpArrays() self.learn() self.fix() self.inxbuild()
Example 3
def _readData1(self, fd, meta, mmap=False, **kwds): ## Read array data from the file descriptor for MetaArray v1 files ## read in axis values for any axis that specifies a length frameSize = 1 for ax in meta['info']: if 'values_len' in ax: ax['values'] = np.fromstring(fd.read(ax['values_len']), dtype=ax['values_type']) frameSize *= ax['values_len'] del ax['values_len'] del ax['values_type'] self._info = meta['info'] if not kwds.get("readAllData", True): return ## the remaining data is the actual array if mmap: subarr = np.memmap(fd, dtype=meta['type'], mode='r', shape=meta['shape']) else: subarr = np.fromstring(fd.read(), dtype=meta['type']) subarr.shape = meta['shape'] self._data = subarr
Example 4
def _readData1(self, fd, meta, mmap=False, **kwds): ## Read array data from the file descriptor for MetaArray v1 files ## read in axis values for any axis that specifies a length frameSize = 1 for ax in meta['info']: if 'values_len' in ax: ax['values'] = np.fromstring(fd.read(ax['values_len']), dtype=ax['values_type']) frameSize *= ax['values_len'] del ax['values_len'] del ax['values_type'] self._info = meta['info'] if not kwds.get("readAllData", True): return ## the remaining data is the actual array if mmap: subarr = np.memmap(fd, dtype=meta['type'], mode='r', shape=meta['shape']) else: subarr = np.fromstring(fd.read(), dtype=meta['type']) subarr.shape = meta['shape'] self._data = subarr
Example 5
def decode_data(obj): """Decode a serialised data object. Parameter --------- obj : Python dictionary A dictionary describing a serialised data object. """ try: if TYPES['str'] == obj[b'type']: return decode_str(obj[b'data']) elif TYPES['ndarray'] == obj[b'type']: return np.fromstring(obj[b'data'], dtype=np.dtype( obj[b'dtype'])).reshape(obj[b'shape']) else: # Assume the user know what they are doing return obj except KeyError: # Assume the user know what they are doing return obj
Example 6
def __init__(self, feat_stride, scales, ratios, is_train=False, output_score=False): super(ProposalOperator, self).__init__() self._feat_stride = float(feat_stride) self._scales = np.fromstring(scales[1:-1], dtype=float, sep=',') self._ratios = np.fromstring(ratios[1:-1], dtype=float, sep=',').tolist() self._anchors = generate_anchors(base_size=self._feat_stride, scales=self._scales, ratios=self._ratios) self._num_anchors = self._anchors.shape[0] self._output_score = output_score if DEBUG: print 'feat_stride: {}'.format(self._feat_stride) print 'anchors:' print self._anchors if is_train: self.cfg_key = 'TRAIN' else: self.cfg_key = 'TEST'
Example 7
def read_uncompressed_patch(pcpatch_wkb, schema): ''' Patch binary structure uncompressed: byte: endianness (1 = NDR, 0 = XDR) uint32: pcid (key to POINTCLOUD_SCHEMAS) uint32: 0 = no compression uint32: npoints pointdata[]: interpret relative to pcid ''' patchbin = unhexlify(pcpatch_wkb) npoints = unpack("I", patchbin[9:13])[0] dt = schema_dtype(schema) patch = np.fromstring(patchbin[13:], dtype=dt) # debug # print(patch[:10]) return patch, npoints
Example 8
def decompress(points, schema): """ Decode patch encoded with lazperf. 'points' is a pcpatch in wkb """ # retrieve number of points in wkb pgpointcloud patch npoints = patch_numpoints(points) hexbuffer = unhexlify(points[34:]) hexbuffer += hexa_signed_int32(npoints) # uncompress s = json.dumps(schema).replace("\\", "") dtype = buildNumpyDescription(json.loads(s)) lazdata = bytes(hexbuffer) arr = np.fromstring(lazdata, dtype=np.uint8) d = Decompressor(arr, s) output = np.zeros(npoints * dtype.itemsize, dtype=np.uint8) decompressed = d.decompress(output) return decompressed
Example 9
def __init__(self, image, samplefac=10, colors=256): # Check Numpy if np is None: raise RuntimeError("Need Numpy for the NeuQuant algorithm.") # Check image if image.size[0] * image.size[1] < NeuQuant.MAXPRIME: raise IOError("Image is too small") if image.mode != "RGBA": raise IOError("Image mode should be RGBA.") # Initialize self.setconstants(samplefac, colors) self.pixels = np.fromstring(image.tostring(), np.uint32) self.setUpArrays() self.learn() self.fix() self.inxbuild()
Example 10
def get_original_image(tfrecords_dir, is_training_data=False): record = tf.python_io.tf_record_iterator(tfrecords_dir).next() example = tf.train.Example() example.ParseFromString(record) shape = np.fromstring(example.features.feature['shape'].bytes_list.value[0], dtype=np.int32) image = np.fromstring(example.features.feature['img_raw'].bytes_list.value[0], dtype=np.float32) image = image.reshape(shape) if is_training_data: ground_truth = np.fromstring(example.features.feature['gt_raw'].bytes_list.value[0], dtype=np.uint8) ground_truth = ground_truth.reshape(shape[:-1]) else: ground_truth = None return image, ground_truth
Example 11
def load_bin_vec(self, fname, vocab): """ Loads 300x1 word vecs from Google (Mikolov) word2vec """ word_vecs = {} with open(fname, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size for line in xrange(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) if word in vocab: word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') else: f.read(binary_len) logger.info("num words already in word2vec: " + str(len(word_vecs))) return word_vecs
Example 12
def vec2bin(input_path, output_path): input_fd = open(input_path, "rb") output_fd = open(output_path, "wb") header = input_fd.readline() output_fd.write(header) vocab_size, vector_size = map(int, header.split()) for line in tqdm(range(vocab_size)): word = [] while True: ch = input_fd.read(1) output_fd.write(ch) if ch == b' ': word = b''.join(word).decode('utf-8') break if ch != b'\n': word.append(ch) vector = np.fromstring(input_fd.readline(), sep=' ', dtype='float32') output_fd.write(vector.tostring()) input_fd.close() output_fd.close()
Example 13
def get_glove_k(self, K): assert hasattr(self, 'glove_path'), 'warning : \ you need to set_glove_path(glove_path)' # create word_vec with k first glove vectors k = 0 word_vec = {} with io.open(self.glove_path) as f: for line in f: word, vec = line.split(' ', 1) if k <= K: word_vec[word] = np.fromstring(vec, sep=' ') k += 1 if k > K: if word in ['<s>', '</s>']: word_vec[word] = np.fromstring(vec, sep=' ') if k>K and all([w in word_vec for w in ['<s>', '</s>']]): break return word_vec
Example 14
def fig2array(fig): """Convert a Matplotlib figure to a 4D numpy array Params ------ fig: A matplotlib figure Return ------ A numpy 3D array of RGBA values Modified version of: http://www.icare.univ-lille1.fr/node/1141 """ # draw the renderer fig.canvas.draw() # Get the RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8) buf.shape = (h, w, 3) return buf
Example 15
def _wav2array(nchannels, sampwidth, data): """data must be the string containing the bytes from the wav file.""" num_samples, remainder = divmod(len(data), sampwidth * nchannels) if remainder > 0: raise ValueError('The length of data is not a multiple of ' 'sampwidth * num_channels.') if sampwidth > 4: raise ValueError("sampwidth must not be greater than 4.") if sampwidth == 3: a = np.empty((num_samples, nchannels, 4), dtype = np.uint8) raw_bytes = np.fromstring(data, dtype = np.uint8) a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth) a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255 result = a.view('<i4').reshape(a.shape[:-1]) else: # 8 bit samples are stored as unsigned ints; others as signed ints. dt_char = 'u' if sampwidth == 1 else 'i' a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth)) result = a.reshape(-1, nchannels) return result
Example 16
def read_array(self, dtype, count=-1, sep=""): """Return numpy array from file. Work around numpy issue #2230, "numpy.fromfile does not accept StringIO object" https://github.com/numpy/numpy/issues/2230. """ try: return numpy.fromfile(self._fh, dtype, count, sep) except IOError: if count < 0: size = self._size else: size = count * numpy.dtype(dtype).itemsize data = self._fh.read(size) return numpy.fromstring(data, dtype, count, sep)
Example 17
def load_bin_vec(fname, vocab): """ Loads 300x1 word vecs from Google (Mikolov) word2vec """ word_vecs = {} with open(fname, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size for line in xrange(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) if word in vocab: word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') else: f.read(binary_len) return word_vecs
Example 18
def load_bin_vec(fname, vocab): """ Loads 300x1 word vecs from Google (Mikolov) word2vec """ word_vecs = {} with open(fname, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size for line in xrange(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) if word in vocab: word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') else: f.read(binary_len) return word_vecs
Example 19
def load_bin_vec(fname, vocab): """ Loads 300x1 word vecs from Google (Mikolov) word2vec """ word_vecs = {} with open(fname, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size for line in xrange(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) if word in vocab: word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') else: f.read(binary_len) return word_vecs
Example 20
def load_wav_file(name): f = wave.open(name, "rb") # print("loading %s"%name) chunk = [] data0 = f.readframes(CHUNK) while data0: # f.getnframes() # data=numpy.fromstring(data0, dtype='float32') # data = numpy.fromstring(data0, dtype='uint16') data = numpy.fromstring(data0, dtype='uint8') data = (data + 128) / 255. # 0-1 for Better convergence # chunks.append(data) chunk.extend(data) data0 = f.readframes(CHUNK) # finally trim: chunk = chunk[0:CHUNK * 2] # should be enough for now -> cut chunk.extend(numpy.zeros(CHUNK * 2 - len(chunk))) # fill with padding 0's # print("%s loaded"%name) return chunk
Example 21
def pfmFromBuffer(buffer, reverse = 1): sStream = cStringIO.StringIO(buffer) color = None width = None height = None scale = None endian = None header = sStream.readline().rstrip() color = (header == 'PF') width, height = map(int, sStream.readline().strip().split(' ')) scale = float(sStream.readline().rstrip()) endian = '<' if(scale < 0) else '>' scale = abs(scale) rawdata = np.fromstring(sStream.read(), endian + 'f') shape = (height, width, 3) if color else (height, width) sStream.close() if(len(shape) == 3): return rawdata.reshape(shape).astype(np.float32)[:,:,::-1] else: return rawdata.reshape(shape).astype(np.float32)
Example 22
def sample(self, filename, save_samples): gan = self.gan generator = gan.generator.sample sess = gan.session config = gan.config x_v, z_v = sess.run([gan.inputs.x, gan.encoder.z]) sample = sess.run(generator, {gan.inputs.x: x_v, gan.encoder.z: z_v}) plt.clf() fig = plt.figure(figsize=(3,3)) plt.scatter(*zip(*x_v), c='b') plt.scatter(*zip(*sample), c='r') plt.xlim([-2, 2]) plt.ylim([-2, 2]) plt.ylabel("z") fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) #plt.savefig(filename) self.plot(data, filename, save_samples) return [{'image': filename, 'label': '2d'}]
Example 23
def _wav2array(nchannels, sampwidth, data): """data must be the string containing the bytes from the wav file.""" num_samples, remainder = divmod(len(data), sampwidth * nchannels) if remainder > 0: raise ValueError('The length of data is not a multiple of ' 'sampwidth * num_channels.') if sampwidth > 4: raise ValueError("sampwidth must not be greater than 4.") if sampwidth == 3: a = np.empty((num_samples, nchannels, 4), dtype = np.uint8) raw_bytes = np.fromstring(data, dtype = np.uint8) a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth) a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255 result = a.view('<i4').reshape(a.shape[:-1]) else: # 8 bit samples are stored as unsigned ints; others as signed ints. dt_char = 'u' if sampwidth == 1 else 'i' a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth)) result = a.reshape(-1, nchannels) return result
Example 24
def load_poses(self): """Load ground truth poses from file.""" print('Loading poses for sequence ' + self.sequence + '...') pose_file = os.path.join(self.pose_path, self.sequence + '.txt') # Read and parse the poses try: self.T_w_cam0 = [] with open(pose_file, 'r') as f: for line in f.readlines(): T = np.fromstring(line, dtype=float, sep=' ') T = T.reshape(3, 4) T = np.vstack((T, [0, 0, 0, 1])) self.T_w_cam0.append(T) print('done.') except FileNotFoundError: print('Ground truth poses are not avaialble for sequence ' + self.sequence + '.')
Example 25
def loadData(src, cimg): gzfname, h = urlretrieve(src, './delete.me') try: with gzip.open(gzfname) as gz: n = struct.unpack('I', gz.read(4)) if n[0] != 0x3080000: raise Exception('Invalid file: unexpected magic number.') n = struct.unpack('>I', gz.read(4))[0] if n != cimg: raise Exception('Invalid file: expected {0} entries.'.format(cimg)) crow = struct.unpack('>I', gz.read(4))[0] ccol = struct.unpack('>I', gz.read(4))[0] if crow != 28 or ccol != 28: raise Exception('Invalid file: expected 28 rows/cols per image.') res = np.fromstring(gz.read(cimg * crow * ccol), dtype=np.uint8) finally: os.remove(gzfname) return res.reshape((cimg, crow * ccol))
Example 26
def get_mnist_data(filename, num_samples, local_data_dir): gzfname = load_or_download_mnist_files(filename, num_samples, local_data_dir) with gzip.open(gzfname) as gz: n = struct.unpack('I', gz.read(4)) # Read magic number. if n[0] != 0x3080000: raise Exception('Invalid file: unexpected magic number.') # Read number of entries. n = struct.unpack('>I', gz.read(4))[0] if n != num_samples: raise Exception('Invalid file: expected {0} entries.'.format(num_samples)) crow = struct.unpack('>I', gz.read(4))[0] ccol = struct.unpack('>I', gz.read(4))[0] if crow != 28 or ccol != 28: raise Exception('Invalid file: expected 28 rows/cols per image.') # Read data. res = np.fromstring(gz.read(num_samples * crow * ccol), dtype = np.uint8) return res.reshape((num_samples, crow * ccol))
Example 27
def get_mnist_labels(filename, num_samples, local_data_dir): gzfname = load_or_download_mnist_files(filename, num_samples, local_data_dir) with gzip.open(gzfname) as gz: n = struct.unpack('I', gz.read(4)) # Read magic number. if n[0] != 0x1080000: raise Exception('Invalid file: unexpected magic number.') # Read number of entries. n = struct.unpack('>I', gz.read(4)) if n[0] != num_samples: raise Exception('Invalid file: expected {0} rows.'.format(num_samples)) # Read labels. res = np.fromstring(gz.read(num_samples), dtype = np.uint8) return res.reshape((num_samples, 1))
Example 28
def shape(self, as_list=True): """ Returns the size of the self tensor as a FloatTensor (or as List). Note: The returned value currently is a FloatTensor because it leverages the messaging mechanism with Unity. Parameters ---------- as_list : bool Value retruned as list if true; else as tensor Returns ------- FloatTensor Output tensor (or) Iterable Output list """ if (as_list): return list(np.fromstring(self.get("shape")[:-1], sep=",").astype('int')) else: shape_tensor = self.no_params_func("shape", return_response=True) return shape_tensor
Example 29
def stride(self, dim=-1): """ Returns the stride of tensor. Parameters ---------- dim : int dimension of expected return Returns ------- FloatTensor Output tensor. (or) numpy.ndarray NumPy Array as Long """ if dim == -1: return self.no_params_func("stride", return_response=True, return_type=None) else: strides = self.params_func("stride", [dim], return_response=True, return_type=None) return np.fromstring(strides, sep=' ').astype('long')
Example 30
def __init__(self, feat_stride, scales, ratios, output_score, rpn_pre_nms_top_n, rpn_post_nms_top_n, threshold, rpn_min_size): super(ProposalOperator, self).__init__() self._feat_stride = feat_stride self._scales = np.fromstring(scales[1:-1], dtype=float, sep=',') self._ratios = np.fromstring(ratios[1:-1], dtype=float, sep=',') self._anchors = generate_anchors(base_size=self._feat_stride, scales=self._scales, ratios=self._ratios) self._num_anchors = self._anchors.shape[0] self._output_score = output_score self._rpn_pre_nms_top_n = rpn_pre_nms_top_n self._rpn_post_nms_top_n = rpn_post_nms_top_n self._threshold = threshold self._rpn_min_size = rpn_min_size if DEBUG: print 'feat_stride: {}'.format(self._feat_stride) print 'anchors:' print self._anchors
Example 31
def __init__(self, feat_stride, scales, ratios, output_score, rpn_pre_nms_top_n, rpn_post_nms_top_n, threshold, rpn_min_size): super(ProposalOperator, self).__init__() self._feat_stride = feat_stride self._scales = np.fromstring(scales[1:-1], dtype=float, sep=',') self._ratios = np.fromstring(ratios[1:-1], dtype=float, sep=',') self._anchors = generate_anchors(base_size=self._feat_stride, scales=self._scales, ratios=self._ratios) self._num_anchors = self._anchors.shape[0] self._output_score = output_score self._rpn_pre_nms_top_n = rpn_pre_nms_top_n self._rpn_post_nms_top_n = rpn_post_nms_top_n self._threshold = threshold self._rpn_min_size = rpn_min_size if DEBUG: print('feat_stride: {}'.format(self._feat_stride)) print('anchors:') print(self._anchors)
Example 32
def vec2bin(input_path, output_path): input_fd = open(input_path, "rb") output_fd = open(output_path, "wb") header = input_fd.readline() output_fd.write(header) vocab_size, vector_size = map(int, header.split()) for line in tqdm(range(vocab_size)): word = [] while True: ch = input_fd.read(1) output_fd.write(ch) if ch == b' ': word = b''.join(word).decode('utf-8') break if ch != b'\n': word.append(ch) vector = np.fromstring(input_fd.readline(), sep=' ', dtype='float32') output_fd.write(vector.tostring()) input_fd.close() output_fd.close()
Example 33
def enwik8_raw_data(data_path=None, num_test_symbols=5000000): """Load raw data from data directory "data_path". The raw Hutter prize data is at: http://mattmahoney.net/dc/enwik8.zip Args: data_path: string path to the directory where simple-examples.tgz has been extracted. num_test_symbols: number of symbols at the end that make up the test set Returns: tuple (train_data, valid_data, test_data, unique) where each of the data objects can be passed to hutter_iterator. """ data_path = os.path.join(data_path, "enwik8") raw_data = _read_symbols(data_path) raw_data = np.fromstring(raw_data, dtype=np.uint8) unique, data = np.unique(raw_data, return_inverse=True) train_data = data[: -2 * num_test_symbols] valid_data = data[-2 * num_test_symbols: -num_test_symbols] test_data = data[-num_test_symbols:] return train_data, valid_data, test_data, unique
Example 34
def text8_raw_data(data_path=None, num_test_symbols=5000000): """Load raw data from data directory "data_path". The raw text8 data is at: http://mattmahoney.net/dc/text8.zip Args: data_path: string path to the directory where simple-examples.tgz has been extracted. num_test_symbols: number of symbols at the end that make up the test set Returns: tuple (train_data, valid_data, test_data, unique) where each of the data objects can be passed to text8_iterator. """ data_path = os.path.join(data_path, "text8") raw_data = _read_symbols(data_path) raw_data = np.fromstring(raw_data, dtype=np.uint8) unique, data = np.unique(raw_data, return_inverse=True) train_data = data[: -2 * num_test_symbols] valid_data = data[-2 * num_test_symbols: -num_test_symbols] test_data = data[-num_test_symbols:] return train_data, valid_data, test_data, unique
Example 35
def load_bin_vec(fname, vocab): """ Loads word vecs from word2vec bin file """ word_vecs = OrderedDict() with open(fname, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size for line in xrange(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) if word in vocab: idx = vocab[word] word_vecs[idx] = np.fromstring(f.read(binary_len), dtype='float32') else: f.read(binary_len) return word_vecs
Example 36
def test_if_items_patch_updates_stock_filter(self, init_db, headers, redis, session, client, api): body = [{ 'name': 'test', 'stores': [{'id': 1}], 'schema': {'properties': {'id': {'type': 'string'}}, 'type': 'object', 'id_names': ['id']} }] client = await client await client.post('/item_types/', headers=headers, data=ujson.dumps(body)) body = [{'id': 'test'}] resp = await client.post('/item_types/1/items?store_id=1', headers=headers, data=ujson.dumps(body)) assert resp.status == 201 test_model = _all_models['store_items_test_1'] await ItemsIndicesMap(test_model).update(session) body = [{'id': 'test', '_operation': 'delete'}] resp = await client.patch('/item_types/1/items?store_id=1', headers=headers, data=ujson.dumps(body)) stock_filter = np.fromstring(await redis.get('store_items_test_1_stock_filter'), dtype=np.bool).tolist() assert stock_filter == [False]
Example 37
def predict(self, input_file): # img = base64.b64decode(input_base64) # img_array = np.fromstring(img, np.uint8) # input_file = cv2.imdecode(img_array, 1) # ip_converted = preprocessing.resizing(input_base64) segmented_image = preprocessing.image_segmentation( preprocessing.resizing(input_file) ) # processed_image = preprocessing.removebg(segmented_image) detect = pycolor.detect_color( segmented_image, self._mapping_file ) return (detect)
Example 38
def load_poses(self): """Load ground truth poses from file.""" print('Loading poses for sequence ' + self.sequence + '...') pose_file = os.path.join(self.pose_path, self.sequence + '.txt') # Read and parse the poses try: self.T_w_cam0 = [] with open(pose_file, 'r') as f: for line in f.readlines(): T = np.fromstring(line, dtype=float, sep=' ') T = T.reshape(3, 4) T = np.vstack((T, [0, 0, 0, 1])) self.T_w_cam0.append(T) print('done.') except FileNotFoundError: print('Ground truth poses are not avaialble for sequence ' + self.sequence + '.')
Example 39
def enwik8_raw_data(data_path=None, num_test_symbols=5000000): """Load raw data from data directory "data_path". The raw Hutter prize data is at: http://mattmahoney.net/dc/enwik8.zip Args: data_path: string path to the directory where simple-examples.tgz has been extracted. num_test_symbols: number of symbols at the end that make up the test set Returns: tuple (train_data, valid_data, test_data, unique) where each of the data objects can be passed to hutter_iterator. """ data_path = os.path.join(data_path, "enwik8") raw_data = _read_symbols(data_path) raw_data = np.fromstring(raw_data, dtype=np.uint8) unique, data = np.unique(raw_data, return_inverse=True) train_data = data[: -2 * num_test_symbols] valid_data = data[-2 * num_test_symbols: -num_test_symbols] test_data = data[-num_test_symbols:] return train_data, valid_data, test_data, unique
Example 40
def text8_raw_data(data_path=None, num_test_symbols=5000000): """Load raw data from data directory "data_path". The raw text8 data is at: http://mattmahoney.net/dc/text8.zip Args: data_path: string path to the directory where simple-examples.tgz has been extracted. num_test_symbols: number of symbols at the end that make up the test set Returns: tuple (train_data, valid_data, test_data, unique) where each of the data objects can be passed to text8_iterator. """ data_path = os.path.join(data_path, "text8") raw_data = _read_symbols(data_path) raw_data = np.fromstring(raw_data, dtype=np.uint8) unique, data = np.unique(raw_data, return_inverse=True) train_data = data[: -2 * num_test_symbols] valid_data = data[-2 * num_test_symbols: -num_test_symbols] test_data = data[-num_test_symbols:] return train_data, valid_data, test_data, unique
Example 41
def load_word_vectors(file_destination): """ This method loads the word vectors from the supplied file destination. It loads the dictionary of word vectors and prints its size and the vector dimensionality. """ print "Loading pretrained word vectors from", file_destination word_dictionary = {} try: f = codecs.open(file_destination, 'r', 'utf-8') for line in f: line = line.split(" ", 1) key = unicode(line[0].lower()) word_dictionary[key] = numpy.fromstring(line[1], dtype="float32", sep=" ") except: print "Word vectors could not be loaded from:", file_destination return {} print len(word_dictionary), "vectors loaded from", file_destination return word_dictionary
Example 42
def checkImageIsValid(imageBin): if imageBin is None: return False try: imageBuf = np.fromstring(imageBin, dtype=np.uint8) img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE) imgH, imgW = img.shape[0], img.shape[1] except: return False else: if imgH * imgW == 0: return False return True
Example 43
def get_frame_input_feature(input_file): features = [] record_iterator = tf.python_io.tf_record_iterator(path=input_file) for i, string_record in enumerate(record_iterator): example = tf.train.SequenceExample() example.ParseFromString(string_record) # traverse the Example format to get data video_id = example.context.feature['video_id'].bytes_list.value[0] label = example.context.feature['labels'].int64_list.value[:] rgbs = [] audios = [] rgb_feature = example.feature_lists.feature_list['rgb'].feature for i in range(len(rgb_feature)): rgb = np.fromstring(rgb_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32) rgb = utils.Dequantize(rgb, 2, -2) rgbs.append(rgb) audio_feature = example.feature_lists.feature_list['audio'].feature for i in range(len(audio_feature)): audio = np.fromstring(audio_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32) audio = utils.Dequantize(audio, 2, -2) audios.append(audio) rgbs = np.array(rgbs) audios = np.array(audios) features.append((video_id, label, rgbs, audios)) return features
Example 44
def get_frame_input_feature(input_file): features = [] record_iterator = tf.python_io.tf_record_iterator(path=input_file) for i, string_record in enumerate(record_iterator): example = tf.train.SequenceExample() example.ParseFromString(string_record) # traverse the Example format to get data video_id = example.context.feature['video_id'].bytes_list.value[0] label = example.context.feature['labels'].int64_list.value[:] rgbs = [] audios = [] rgb_feature = example.feature_lists.feature_list['rgb'].feature for i in range(len(rgb_feature)): rgb = np.fromstring(rgb_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32) rgb = utils.Dequantize(rgb, 2, -2) rgbs.append(rgb) audio_feature = example.feature_lists.feature_list['audio'].feature for i in range(len(audio_feature)): audio = np.fromstring(audio_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32) audio = utils.Dequantize(audio, 2, -2) audios.append(audio) rgbs = np.array(rgbs) audios = np.array(audios) features.append((video_id, label, rgbs, audios)) return features
Example 45
def get_frame_input_feature(input_file): features = [] record_iterator = tf.python_io.tf_record_iterator(path=input_file) for i, string_record in enumerate(record_iterator): example = tf.train.SequenceExample() example.ParseFromString(string_record) # traverse the Example format to get data video_id = example.context.feature['video_id'].bytes_list.value[0] label = example.context.feature['labels'].int64_list.value[:] rgbs = [] audios = [] rgb_feature = example.feature_lists.feature_list['rgb'].feature for i in range(len(rgb_feature)): rgb = np.fromstring(rgb_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32) rgb = utils.Dequantize(rgb, 2, -2) rgbs.append(rgb) audio_feature = example.feature_lists.feature_list['audio'].feature for i in range(len(audio_feature)): audio = np.fromstring(audio_feature[i].bytes_list.value[0], dtype=np.uint8).astype(np.float32) audio = utils.Dequantize(audio, 2, -2) audios.append(audio) rgbs = np.array(rgbs) audios = np.array(audios) features.append((video_id, label, rgbs, audios)) return features
Example 46
def _unpack_data_block(f, blocksize, packing): """ Private method to read a block from a file into a NumPy array. """ return numpy.fromstring(f.read(blocksize), packing)
Example 47
def get_full_alignment_base_quality_scores(read): """ Returns base quality scores for the full read alignment, inserting zeroes for deletions and removing inserted and soft-clipped bases. Therefore, only returns quality for truly aligned sequenced bases. Args: read (pysam.AlignedSegment): read to get quality scores for Returns: np.array: numpy array of quality scores """ quality_scores = np.fromstring(read.qual, dtype=np.byte) - tk_constants.ILLUMINA_QUAL_OFFSET start_pos = 0 for operation,length in read.cigar: operation = cr_constants.cigar_numeric_to_category_map[operation] if operation == 'D': quality_scores = np.insert(quality_scores, start_pos, [0] * length) elif operation == 'I' or operation == 'S': quality_scores = np.delete(quality_scores, np.s_[start_pos:start_pos + length]) if not operation == 'I' and not operation == 'S': start_pos += length return start_pos, quality_scores
Example 48
def get_qvs(qual): if qual is None: return None return numpy.fromstring(qual, dtype=numpy.byte) - ILLUMINA_QUAL_OFFSET
Example 49
def get_bases_qual(qual, cutoff): if qual is None: return None qvs = numpy.fromstring(qual, dtype=numpy.byte) - ILLUMINA_QUAL_OFFSET return numpy.count_nonzero(qvs[qvs > cutoff])
Example 50
def get_min_qual(qual): if qual is None or len(qual) == 0: return None return (numpy.fromstring(qual, dtype=numpy.byte) - ILLUMINA_QUAL_OFFSET).min()