Example 1
def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data
Example 2
def encode_jpeg(arr): assert arr.dtype == np.uint8 # simulate multi-channel array for single channel arrays if len(arr.shape) == 3: arr = np.expand_dims(arr, 3) # add channels to end of x,y,z arr = arr.transpose((3,2,1,0)) # channels, z, y, x reshaped = arr.reshape(arr.shape[3] * arr.shape[2], arr.shape[1] * arr.shape[0]) if arr.shape[0] == 1: img = Image.fromarray(reshaped, mode='L') elif arr.shape[0] == 3: img = Image.fromarray(reshaped, mode='RGB') else: raise ValueError("Number of image channels should be 1 or 3. Got: {}".format(arr.shape[3])) f = io.BytesIO() img.save(f, "JPEG") return f.getvalue()
Example 3
def array2PIL(arr, size): mode = 'RGBA' arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2]) if len(arr[0]) == 3: arr = numpy.c_[arr, 255*numpy.ones((len(arr),1), numpy.uint8)] return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)
Example 4
def extract_images(filename): """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2051: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, filename)) num_images = _read32(bytestream) rows = _read32(bytestream) cols = _read32(bytestream) buf = bytestream.read(rows * cols * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8) data = data.reshape(num_images, rows, cols, 1) return data
Example 5
def _write_digital_u_8( task_handle, write_array, num_samps_per_chan, auto_start, timeout, data_layout=FillMode.GROUP_BY_CHANNEL): samps_per_chan_written = ctypes.c_int() cfunc = lib_importer.windll.DAQmxWriteDigitalU8 if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, c_bool32, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.uint8, flags=('C', 'W')), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, auto_start, timeout, data_layout.value, write_array, ctypes.byref(samps_per_chan_written), None) check_for_error(error_code) return samps_per_chan_written.value
Example 6
def _read_digital_u_8( task_handle, read_array, num_samps_per_chan, timeout, fill_mode=FillMode.GROUP_BY_CHANNEL): samps_per_chan_read = ctypes.c_int() cfunc = lib_importer.windll.DAQmxReadDigitalU8 if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.uint8, flags=('C', 'W')), ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, timeout, fill_mode.value, read_array, numpy.prod(read_array.shape), ctypes.byref(samps_per_chan_read), None) check_for_error(error_code) return samps_per_chan_read.value
Example 7
def __getitem__(self, index): """__getitem__ :param index: """ img_path = self.files[self.split][index].rstrip() lbl_path = os.path.join(self.annotations_base, os.path.basename(img_path)[:-4] + '.png') img = m.imread(img_path) img = np.array(img, dtype=np.uint8) lbl = m.imread(lbl_path) lbl = np.array(lbl, dtype=np.uint8) if self.is_transform: img, lbl = self.transform(img, lbl) return img, lbl
Example 8
def __getitem__(self, index): img_name = self.files[self.split][index] img_path = self.root + '/' + self.split + '/' + img_name lbl_path = self.root + '/' + self.split + 'annot/' + img_name img = m.imread(img_path) img = np.array(img, dtype=np.uint8) lbl = m.imread(lbl_path) lbl = np.array(lbl, dtype=np.int8) if self.augmentations is not None: img, lbl = self.augmentations(img, lbl) if self.is_transform: img, lbl = self.transform(img, lbl) return img, lbl
Example 9
def __getitem__(self, index): """__getitem__ :param index: """ img_path = self.files[self.split][index].rstrip() lbl_path = os.path.join(self.annotations_base, img_path.split(os.sep)[-2], os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png') img = m.imread(img_path) img = np.array(img, dtype=np.uint8) lbl = m.imread(lbl_path) lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8)) if self.augmentations is not None: img, lbl = self.augmentations(img, lbl) if self.is_transform: img, lbl = self.transform(img, lbl) return img, lbl
Example 10
def _get_dtype_maps(): """ Get dictionaries to map numpy data types to ITK types and the other way around. """ # Define pairs tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'), (np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'), (np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'), (np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'), (np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ] # Create dictionaries map1, map2 = {}, {} for np_type, itk_type in tmp: map1[np_type.__name__] = itk_type map2[itk_type] = np_type.__name__ # Done return map1, map2
Example 11
def loadLogoSet(path, rows,cols,test_data_rate=0.15): random.seed(612) _, imgID = readItems('data.txt') y, _ = modelDict(path) nPics = len(y) faceassset = np.zeros((nPics,rows,cols), dtype = np.uint8) ### gray images noImg = [] for i in range(nPics): temp = cv2.imread(path +'logo/'+imgID[i]+'.jpg', 0) if temp == None: noImg.append(i) elif temp.size < 1000: noImg.append(i) else: temp = cv2.resize(temp,(cols, rows), interpolation = cv2.INTER_CUBIC) faceassset[i,:,:] = temp y = np.delete(y, noImg,0); faceassset = np.delete(faceassset, noImg, 0) nPics = len(y) index = random.sample(np.arange(nPics), int(nPics*test_data_rate)) x_test = faceassset[index,:,:]; x_train = np.delete(faceassset, index, 0) y_test = y[index]; y_train = np.delete(y, index, 0) return (x_train, y_train), (x_test, y_test)
Example 12
def writeBinaray(outputFile, imagePath, label): img = Image.open(imagePath) img = img.resize((imageSize, imageSize), PIL.Image.ANTIALIAS) img = (np.array(img)) r = img[:,:,0].flatten() g = img[:,:,1].flatten() b = img[:,:,2].flatten() label = [label] out = np.array(list(label) + list(r) + list(g) + list(b), np.uint8) outputFile.write(out.tobytes()) # if you want to show the encoded image. set up 'debugEncodedImage' flag if debugEncodedImage: showImage(r, g, b)
Example 13
def test_gray2rgb(): x = np.array([0, 0.5, 1]) assert_raises(ValueError, gray2rgb, x) x = x.reshape((3, 1)) y = gray2rgb(x) assert_equal(y.shape, (3, 1, 3)) assert_equal(y.dtype, x.dtype) assert_equal(y[..., 0], x) assert_equal(y[0, 0, :], [0, 0, 0]) x = np.array([[0, 128, 255]], dtype=np.uint8) z = gray2rgb(x) assert_equal(z.shape, (1, 3, 3)) assert_equal(z[..., 0], x) assert_equal(z[0, 1, :], [128, 128, 128])
Example 14
def draw_sequences_test(step, action, qval, draw, region_image, background, path_testing_folder, region_mask, image_name, save_boolean): aux = np.asarray(region_image, np.uint8) img_offset = (1000 * step, 70) footnote_offset = (1000 * step, 550) q_predictions_offset = (1000 * step, 500) mask_img_offset = (1000 * step, 700) img_for_paste = Image.fromarray(aux) background.paste(img_for_paste, img_offset) mask_img = Image.fromarray(255 * region_mask) background.paste(mask_img, mask_img_offset) footnote = 'action: ' + str(action) q_val_predictions_text = str(qval) draw.text(footnote_offset, footnote, (0, 0, 0), font=font) draw.text(q_predictions_offset, q_val_predictions_text, (0, 0, 0), font=font) file_name = path_testing_folder + image_name + '.png' if save_boolean == 1: background.save(file_name) return background
Example 15
def room2blocks_plus_normalized(data_label, num_point, block_size, stride, random_sample, sample_num, sample_aug): """ room2block, with input filename and RGB preprocessing. for each block centralize XYZ, add normalized XYZ as 678 channels """ data = data_label[:,0:6] data[:,3:6] /= 255.0 label = data_label[:,-1].astype(np.uint8) max_room_x = max(data[:,0]) max_room_y = max(data[:,1]) max_room_z = max(data[:,2]) data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride, random_sample, sample_num, sample_aug) new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) for b in range(data_batch.shape[0]): new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z minx = min(data_batch[b, :, 0]) miny = min(data_batch[b, :, 1]) data_batch[b, :, 0] -= (minx+block_size/2) data_batch[b, :, 1] -= (miny+block_size/2) new_data_batch[:, :, 0:6] = data_batch return new_data_batch, label_batch
Example 16
def room2samples_plus_normalized(data_label, num_point): """ room2sample, with input filename and RGB preprocessing. for each block centralize XYZ, add normalized XYZ as 678 channels """ data = data_label[:,0:6] data[:,3:6] /= 255.0 label = data_label[:,-1].astype(np.uint8) max_room_x = max(data[:,0]) max_room_y = max(data[:,1]) max_room_z = max(data[:,2]) #print(max_room_x, max_room_y, max_room_z) data_batch, label_batch = room2samples(data, label, num_point) new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) for b in range(data_batch.shape[0]): new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z #minx = min(data_batch[b, :, 0]) #miny = min(data_batch[b, :, 1]) #data_batch[b, :, 0] -= (minx+block_size/2) #data_batch[b, :, 1] -= (miny+block_size/2) new_data_batch[:, :, 0:6] = data_batch return new_data_batch, label_batch
Example 17
def draw_attention(img, *masks): cmap = plt.get_cmap('jet') imgs = [] for mask in masks: # convert to heat map rgba_img = cmap(mask) rgb_img = np.delete(rgba_img, 3, 2) rgb_img = (rgb_img * 255) # mean mean_img = ((rgb_img + img) / 2).astype(np.uint8) # convert to PIL.Image mean_img = Image.fromarray(mean_img, "RGB") imgs.append(mean_img) return imgs
Example 18
def test_fill_missing(): info = CloudVolume.create_new_info( num_channels=1, # Increase this number when we add more tests for RGB layer_type='image', data_type='uint8', encoding='raw', resolution=[ 1,1,1 ], voxel_offset=[0,0,0], volume_size=[128,128,64], mesh='mesh', chunk_size=[ 64,64,64 ], ) vol = CloudVolume('file:///tmp/cloudvolume/empty_volume', mip=0, info=info) vol.commit_info() vol = CloudVolume('file:///tmp/cloudvolume/empty_volume', mip=0, fill_missing=True) assert np.count_nonzero(vol[:]) == 0 vol = CloudVolume('file:///tmp/cloudvolume/empty_volume', mip=0, fill_missing=True, cache=True) assert np.count_nonzero(vol[:]) == 0 assert np.count_nonzero(vol[:]) == 0 vol.flush_cache() delete_layer('/tmp/cloudvolume/empty_volume')
Example 19
def test_write(): delete_layer() cv, data = create_layer(size=(50,50,50,1), offset=(0,0,0)) replacement_data = np.zeros(shape=(50,50,50,1), dtype=np.uint8) cv[0:50,0:50,0:50] = replacement_data assert np.all(cv[0:50,0:50,0:50] == replacement_data) replacement_data = np.random.randint(255, size=(50,50,50,1), dtype=np.uint8) cv[0:50,0:50,0:50] = replacement_data assert np.all(cv[0:50,0:50,0:50] == replacement_data) # out of bounds delete_layer() cv, data = create_layer(size=(128,64,64,1), offset=(10,20,0)) with pytest.raises(ValueError): cv[74:150,20:84,0:64] = np.ones(shape=(64,64,64,1), dtype=np.uint8) # non-aligned writes delete_layer() cv, data = create_layer(size=(128,64,64,1), offset=(10,20,0)) with pytest.raises(ValueError): cv[21:85,0:64,0:64] = np.ones(shape=(64,64,64,1), dtype=np.uint8)
Example 20
def draw_bounding_boxes(image, gt_boxes, im_info): num_boxes = gt_boxes.shape[0] gt_boxes_new = gt_boxes.copy() gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2]) disp_image = Image.fromarray(np.uint8(image[0])) for i in xrange(num_boxes): this_class = int(gt_boxes_new[i, 4]) disp_image = _draw_single_box(disp_image, gt_boxes_new[i, 0], gt_boxes_new[i, 1], gt_boxes_new[i, 2], gt_boxes_new[i, 3], 'N%02d-C%02d' % (i, this_class), FONT, color=STANDARD_COLORS[this_class % NUM_COLORS]) image[0, :] = np.array(disp_image) return image
Example 21
def get_color_arr(c, n, flip_rb=False): """ Convert string c to carr array (N x 3) format """ carr = None; if isinstance(c, str): # single color carr = np.tile(np.array(colorConverter.to_rgb(c)), [n,1]) elif isinstance(c, float): carr = np.tile(np.array(color_func(c)), [n,1]) else: carr = reshape_arr(c) if flip_rb: b, r = carr[:,0], carr[:,2] carr[:,0], carr[:,2] = r.copy(), b.copy() # return floating point with values in [0,1] return carr.astype(np.float32) / 255.0 if carr.dtype == np.uint8 else carr.astype(np.float32)
Example 22
def test_uw_rgbd_scene(version='v1'): from pybot.vision.image_utils import to_color from pybot.vision.imshow_utils import imshow_cv v1_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v1/' v2_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v2/rgbd-scenes-v2/' if version == 'v1': rgbd_data_uw = UWRGBDSceneDataset(version='v1', directory=os.path.join(v1_directory, 'rgbd-scenes'), aligned_directory=os.path.join(v1_directory, 'rgbd-scenes-aligned')) elif version == 'v2': rgbd_data_uw = UWRGBDSceneDataset(version='v2', directory=v2_directory) else: raise RuntimeError('''Version %s not supported. ''' '''Check dataset and choose v1/v2 scene dataset''' % version) for f in rgbd_data_uw.iteritems(every_k_frames=5, with_ground_truth=True): vis = rgbd_data_uw.annotate(f) imshow_cv('frame', np.hstack([f.img, vis]), text='Image') imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth') cv2.waitKey(100) return rgbd_data_uw
Example 23
def _process_label(self, fn): """ TODO: Fix one-indexing to zero-index; retained one-index due to uint8 constraint """ mat = loadmat(fn, squeeze_me=True) _labels = mat['seglabel'].astype(np.uint8) # _labels -= 1 # (move to zero-index) labels = np.zeros_like(_labels) for (idx, name) in enumerate(mat['names']): try: value = SUNRGBDDataset.target_hash[name] except: value = 0 mask = _labels == idx+1 labels[mask] = value return self._pad_image(labels)
Example 24
def colormap(im, min_threshold=0.01): mask = im<min_threshold if im.ndim == 1: print im hsv = np.zeros((len(im), 3), dtype=np.uint8) hsv[:,0] = (im * 180).astype(np.uint8) hsv[:,1] = 255 hsv[:,2] = 255 bgr = cv2.cvtColor(hsv.reshape(-1,1,3), cv2.COLOR_HSV2BGR).reshape(-1,3) bgr[mask] = 0 else: hsv = np.zeros((im.shape[0], im.shape[1], 3), np.uint8) hsv[...,0] = (im * 180).astype(np.uint8) hsv[...,1] = 255 hsv[...,2] = 255 bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) bgr[mask] = 0 return bgr
Example 25
def set_value(self, value: int) -> None: value = self.bounds(value) # automatically performs 2s comp if needed binary = np.binary_repr(value, width=8) self.values = np.array(list(binary), dtype=np.uint8)
Example 26
def input_dataset(): clock_key = SimlabAccessor._clock_key mclock_key = SimlabAccessor._master_clock_key svars_key = SimlabAccessor._snapshot_vars_key ds = xr.Dataset() ds['clock'] = ('clock', [0, 2, 4, 6, 8], {clock_key: np.uint8(True), mclock_key: np.uint8(True)}) ds['out'] = ('out', [0, 4, 8], {clock_key: np.uint8(True)}) ds['grid__x_size'] = ((), 10, {'description': 'grid size'}) ds['quantity__quantity'] = ('x', np.zeros(10), {'description': 'a quantity'}) ds['some_process__some_param'] = ((), 1, {'description': 'some parameter'}) ds['other_process__other_param'] = ('clock', [1, 2, 3, 4, 5], {'description': 'other parameter'}) ds['clock'].attrs[svars_key] = 'quantity__quantity' ds['out'].attrs[svars_key] = ('other_process__other_effect,' 'some_process__some_effect') ds.attrs[svars_key] = 'grid__x' return ds
Example 27
def findCorners(contour): """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8) cv2.drawContours(blank_image, contour, -1, (255, 255, 255)) rows,cols = img.shape[0], img.shape[1] M = cv2.getRotationMatrix2D((cols/2,rows/2),-45,0.5) dst = cv2.warpAffine(blank_image,M,(cols,rows)) cv2.imshow("rotatio", dst) cv2.waitKey()""" rect = cv2.minAreaRect(contour) box = cv2.boxPoints(rect) box = np.int0(box) height_px_1 = box[0][1] - box[3][1] height_px_2 = box[1][1] - box[2][1] print height_px_1, height_px_2 if height_px_1 < height_px_2: close_height_px = height_px_2 far_height_px = height_px_1 else: close_height_px = height_px_1 far_height_px = height_px_2 return close_height_px, far_height_px
Example 28
def make_grid(tensor, nrow=8, padding=2, normalize=False, scale_each=False): """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py""" nmaps = tensor.shape[0] xmaps = min(nrow, nmaps) ymaps = int(math.ceil(float(nmaps) / xmaps)) height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding) grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8) k = 0 for y in range(ymaps): for x in range(xmaps): if k >= nmaps: break h, h_width = y * height + 1 + padding // 2, height - padding w, w_width = x * width + 1 + padding // 2, width - padding grid[h:h+h_width, w:w+w_width] = tensor[k] k = k + 1 return grid
Example 29
def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images)/duration*t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x+1)/2*255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps = len(images) / duration)
Example 30
def make_grid(tensor, nrow=8, padding=2, normalize=False, scale_each=False): """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py minor improvement, row/col was reversed""" nmaps = tensor.shape[0] ymaps = min(nrow, nmaps) xmaps = int(math.ceil(float(nmaps) / ymaps)) height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding) grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8) k = 0 for y in range(ymaps): for x in range(xmaps): if k >= nmaps: break h, h_width = y * height + 1 + padding // 2, height - padding w, w_width = x * width + 1 + padding // 2, width - padding grid[h:h+h_width, w:w+w_width] = tensor[k] k = k + 1 return grid
Example 31
def deprocess(img4d): img = img4d.copy() if K.image_dim_ordering() == "th": # (B, C, H, W) img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3])) # (C, H, W) -> (H, W, C) img = img.transpose((1, 2, 0)) else: # (B, H, W, C) img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3])) img[:, :, 0] += 103.939 img[:, :, 1] += 116.779 img[:, :, 2] += 123.68 # BGR -> RGB img = img[:, :, ::-1] img = np.clip(img, 0, 255).astype("uint8") return img ########################### main ###########################
Example 32
def get_data(): corpus_path = os.path.join(os.path.dirname(__file__), 'data/lm/tiny_shakespeare.txt') raw_text = open(corpus_path, 'r').read() chars = list(set(raw_text)) data_size, vocab_size = len(raw_text), len(chars) print("data has %s charactres, % unique." % (data_size, vocab_size)) char_to_index = {ch: i for i, ch in enumerate(chars)} index_to_char = {i: ch for i, ch in enumerate(chars)} time_steps, batch_size = 30, 40 length = batch_size * 20 text_pointers = np.random.randint(data_size - time_steps - 1, size=length) batch_in = np.zeros([length, time_steps, vocab_size]) batch_out = np.zeros([length, vocab_size], dtype=np.uint8) for i in range(length): b_ = [char_to_index[c] for c in raw_text[text_pointers[i]:text_pointers[i] + time_steps + 1]] batch_in[i, range(time_steps), b_[:-1]] = 1 batch_out[i, b_[-1]] = 1 return batch_size, vocab_size, time_steps, batch_in, batch_out
Example 33
def get_data(): corpus_path = os.path.join(os.path.dirname(__file__), 'data/lm/tiny_shakespeare.txt') raw_text = open(corpus_path, 'r').read() chars = list(set(raw_text)) data_size, vocab_size = len(raw_text), len(chars) print("data has %s charactres, % unique." % (data_size, vocab_size)) char_to_index = {ch: i for i, ch in enumerate(chars)} index_to_char = {i: ch for i, ch in enumerate(chars)} time_steps, batch_size = 30, 40 length = batch_size * 20 text_pointers = np.random.randint(data_size - time_steps - 1, size=length) batch_in = np.zeros([length, time_steps, vocab_size]) batch_out = np.zeros([length, vocab_size], dtype=np.uint8) for i in range(length): b_ = [char_to_index[c] for c in raw_text[text_pointers[i]:text_pointers[i] + time_steps + 1]] batch_in[i, range(time_steps), b_[:-1]] = 1 batch_out[i, b_[-1]] = 1 return batch_size, vocab_size, time_steps, batch_in, batch_out
Example 34
def __read_spike_fixed(self, numpts=40): """ Read a spike with a fixed waveform length (40 time bins) ------------------------------------------- Returns the time, waveform and trig2 value. The returned objects must be converted to a SpikeTrain then added to the Block. ID: 29079 """ # float32 -- spike time stamp in ms since start of SpikeTrain time = np.fromfile(self._fsrc, dtype=np.float32, count=1) # int8 * 40 -- spike shape -- use numpts for spike_var waveform = np.fromfile(self._fsrc, dtype=np.int8, count=numpts).reshape(1, 1, numpts) # uint8 -- point of return to noise trig2 = np.fromfile(self._fsrc, dtype=np.uint8, count=1) return time, waveform, trig2
Example 35
def __read_spike_var(self): """ Read a spike with a variable waveform length ------------------------------------------- Returns the time, waveform and trig2 value. The returned objects must be converted to a SpikeTrain then added to the Block. ID: 29115 """ # uint8 -- number of points in spike shape numpts = np.fromfile(self._fsrc, dtype=np.uint8, count=1)[0] # spike_fixed is the same as spike_var if you don't read the numpts # byte and set numpts = 40 return self.__read_spike_fixed(numpts)
Example 36
def __read_spike_fixed(self, numpts=40): """ Read a spike with a fixed waveform length (40 time bins) ------------------------------------------- Returns the time, waveform and trig2 value. The returned objects must be converted to a SpikeTrain then added to the Block. ID: 29079 """ # float32 -- spike time stamp in ms since start of SpikeTrain time = np.fromfile(self._fsrc, dtype=np.float32, count=1) # int8 * 40 -- spike shape -- use numpts for spike_var waveform = np.fromfile(self._fsrc, dtype=np.int8, count=numpts).reshape(1, 1, numpts) # uint8 -- point of return to noise trig2 = np.fromfile(self._fsrc, dtype=np.uint8, count=1) return time, waveform, trig2
Example 37
def __read_spike_var(self): """ Read a spike with a variable waveform length ------------------------------------------- Returns the time, waveform and trig2 value. The returned objects must be converted to a SpikeTrain then added to the Block. ID: 29115 """ # uint8 -- number of points in spike shape numpts = np.fromfile(self._fsrc, dtype=np.uint8, count=1)[0] # spike_fixed is the same as spike_var if you don't read the numpts # byte and set numpts = 40 return self.__read_spike_fixed(numpts)
Example 38
def __init__(self, *args, **kwds): import numpy self.dst_types = [numpy.uint8, numpy.uint16, numpy.uint32] try: self.dst_types.append(numpy.uint64) except AttributeError: pass pygame.display.init() try: unittest.TestCase.__init__(self, *args, **kwds) self.sources = [self._make_src_surface(8), self._make_src_surface(16), self._make_src_surface(16, srcalpha=True), self._make_src_surface(24), self._make_src_surface(32), self._make_src_surface(32, srcalpha=True)] finally: pygame.display.quit()
Example 39
def array2d(surface): """pygame.numpyarray.array2d(Surface): return array copy pixels into a 2d array Copy the pixels from a Surface into a 2D array. The bit depth of the surface will control the size of the integer values, and will work for any type of pixel format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method). """ bpp = surface.get_bytesize() try: dtype = (numpy.uint8, numpy.uint16, numpy.int32, numpy.int32)[bpp - 1] except IndexError: raise ValueError("unsupported bit depth %i for 2D array" % (bpp * 8,)) size = surface.get_size() array = numpy.empty(size, dtype) surface_to_array(array, surface) return array
Example 40
def array3d(surface): """pygame.numpyarray.array3d(Surface): return array copy pixels into a 3d array Copy the pixels from a Surface into a 3D array. The bit depth of the surface will control the size of the integer values, and will work for any type of pixel format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method). """ w, h = surface.get_size() array = numpy.empty((w, h, 3), numpy.uint8) surface_to_array(array, surface) return array
Example 41
def array_red(surface): """pygame.numpyarray.array_red(Surface): return array copy pixel red into a 2d array Copy the pixel red values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method). """ size = surface.get_size() array = numpy.empty(size, numpy.uint8) surface_to_array(array, surface, 'R') return array
Example 42
def array_green(surface): """pygame.numpyarray.array_green(Surface): return array copy pixel green into a 2d array Copy the pixel green values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method). """ size = surface.get_size() array = numpy.empty(size, numpy.uint8) surface_to_array(array, surface, 'G') return array
Example 43
def array_blue(surface): """pygame.numpyarray.array_blue(Surface): return array copy pixel blue into a 2d array Copy the pixel blue values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method). """ size = surface.get_size() array = numpy.empty(size, numpy.uint8) surface_to_array(array, surface, 'B') return array
Example 44
def array_colorkey(surface): """pygame.numpyarray.array_colorkey(Surface): return array copy the colorkey values into a 2d array Create a new array with the colorkey transparency value from each pixel. If the pixel matches the colorkey it will be fully tranparent; otherwise it will be fully opaque. This will work on any type of Surface format. If the image has no colorkey a solid opaque array will be returned. This function will temporarily lock the Surface as pixels are copied. """ size = surface.get_size() array = numpy.empty(size, numpy.uint8) surface_to_array(array, surface, 'C') return array
Example 45
def write_tfrecord(self, img_list, label_list, record_path): # write a single tfrecord if os.path.exists(record_path): print ("%s exists!"%record_path) return self._check_list() print ("write %s"%record_path) self._write_info() writer = tf.python_io.TFRecordWriter(record_path) c = 0 for imgname,label in zip(img_list,label_list): img = Image.open(imgname).resize((self.flags.width, self.flags.height)) data = np.array(img).astype(np.uint8) img,data = self._check_color(img,data) example = self._get_example(data,label) writer.write(example.SerializeToString()) c+=1 if c%LOG_EVERY == 0: print ("%d images written to tfrecord"%c) writer.close() print("writing %s done"%record_path)
Example 46
def do_random_brightness(self, img): if np.random.rand() > 0.7: return img hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int) hsv[:,:,2] += np.random.randint(-40,70) hsv = np.clip(hsv, 0, 255).astype(np.uint8) img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) return img
Example 47
def ONES(n): return np.ones((n, n), np.uint8)
Example 48
def conv2d(x,W,strides=[1,1,1,1],name=None): # return an op that convolves x with W strides = np.array(strides) if strides.size == 1: strides = np.array([1,strides,strides,1]) elif strides.size == 2: strides = np.array([1,strides[0],strides[1],1]) if np.any(strides < 1): strides = np.around(1./strides).astype(np.uint8) return tf.nn.conv2d_transpose(x,W,strides=strides.tolist(),padding='SAME',name=name) else: return tf.nn.conv2d(x,W,strides=strides.tolist(),padding='SAME',name=name)
Example 49
def conv3d(x,W,strides=1,name=None): # return an op that convolves x with W strides = np.array(strides) if strides.size == 1: strides = np.array([1,strides,strides,strides[0],1]) elif strides.size == 3: strides = np.array([1,strides[0],strides[1],strides[2],1]) if np.any(strides < 1): strides = np.around(1./strides).astype(np.uint8) return tf.nn.conv3d_transpose(x,W,strides=strides.tolist(),padding='SAME',name=name) else: return tf.nn.conv3d(x,W,strides=strides.tolist(),padding='SAME',name=name)
Example 50
def extract_labels(filename, one_hot=False): """Extract the labels into a 1D uint8 numpy array [index].""" print('Extracting', filename) with gzip.open(filename) as bytestream: magic = _read32(bytestream) if magic != 2049: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, filename)) num_items = _read32(bytestream) buf = bytestream.read(num_items) labels = numpy.frombuffer(buf, dtype=numpy.uint8) if one_hot: return dense_to_one_hot(labels) return labels