The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def get_comma_separated_data(raw): # Convert to long string header, data = "".join(raw).strip().split(" = ") # Remove trailing comma assert data[-1] == ';' data = data[:-1] # Remove newline characters and convert to list data = eval(data.replace("\n", '')) shape = tuple(eval(header[header.index("["):header.index("]") + 1])) step_size = functools.reduce(operator.mul, shape) + 1 years = np.array(data[::step_size], dtype=int) data = np.stack([ np.array(data[1 + index * step_size:(index + 1) * step_size]).reshape(shape) for index in range(len(years)) ], axis=-1) return header, years, data
Example 2
def get_3d_data_slices(slices): # get data in Hunsfield Units slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9 image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images ) image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0 # Convert to Hounsfield units (HU) # The intercept is usually -1024 for slice_number in range(len(slices)): # from v 8 intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: # added 16 Jan 2016, evening image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
Example 3
def get_pixels_hu(slices): image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # Set outside-of-scan pixels to 0 # The intercept is usually -1024, so air is approximately 0 image[image == -2000] = 0 # Convert to Hounsfield units (HU) ### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96) ### Changes/correction - 31.01.2017 for slice_number in range(len(slices)): intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
Example 4
def get_3d_data_slices(slices): # get data in Hunsfield Units #slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] #slices.sort(key=lambda x: int(x.InstanceNumber)) # was x.InstanceNumber slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8 image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images ) image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0 # Convert to Hounsfield units (HU) # The intercept is usually -1024 for slice_number in range(len(slices)): # from v 8 intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: # added 16 Jan 2016, evening image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
Example 5
def get_3d_data_hu(path): # get data in Hunsfield Units slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] #slices.sort(key=lambda x: int(x.InstanceNumber)) # was x.InstanceNumber #slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v8 - BUGGY slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from 22.02 image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images ) image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0 # Convert to Hounsfield units (HU) # The intercept is usually -1024 for slice_number in range(len(slices)): # from v 8 intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: # added 16 Jan 2016, evening image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
Example 6
def test_against_numpy_nanstd(self): source = [np.random.random((16, 12, 5)) for _ in range(10)] for arr in source: arr[randint(0, 15), randint(0, 11), randint(0, 4)] = np.nan stack = np.stack(source, axis = -1) for axis in (0, 1, 2, None): for ddof in range(4): with self.subTest('axis = {}, ddof = {}'.format(axis, ddof)): from_numpy = np.nanstd(stack, axis = axis, ddof = ddof) from_ivar = last(istd(source, axis = axis, ddof = ddof, ignore_nan = True)) self.assertSequenceEqual(from_numpy.shape, from_ivar.shape) self.assertTrue(np.allclose(from_ivar, from_numpy))
Example 7
def test_against_numpy(self): """ Test iall against numpy.all """ stream = [np.zeros((8, 16, 2)) for _ in range(11)] stream[3][3,0,1] = 1 # so that np.all(axis = None) evaluates to False stack = np.stack(stream, axis = -1) with self.subTest('axis = None'): from_numpy = np.all(stack, axis = None) from_stream = last(iall(stream, axis = None)) self.assertEqual(from_numpy, from_stream) for axis in range(stack.ndim): with self.subTest('axis = {}'.format(axis)): from_numpy = np.all(stack, axis = axis) from_stream = last(iall(stream, axis = axis)) self.assertTrue(np.allclose(from_numpy, from_stream))
Example 8
def snapshot_to_xarray_variable(self, key, clock=None): """Convert snapshots taken for a specific model variable to an xarray.Variable object. """ proc_name, var_name = key variable = self.model._processes[proc_name]._variables[var_name] array_list = self.snapshot_values[key] first_array = array_list[0] if len(array_list) == 1: data = first_array else: data = np.stack(array_list) dims = _get_dims_from_variable(first_array, variable) if clock is not None and len(array_list) > 1: dims = (clock,) + dims attrs = variable.attrs.copy() attrs['description'] = variable.description return xr.Variable(dims, data, attrs=attrs)
Example 9
def get_interv_table(model,intrv=True): n_batches=25 table_outputs=[] d_vals=np.linspace(TINY,0.6,n_batches) for name in model.cc.node_names: outputs=[] for d_val in d_vals: do_dict={model.cc.node_dict[name].label_logit : d_val*np.ones((model.batch_size,1))} outputs.append(model.sess.run(model.fake_labels,do_dict)) out=np.vstack(outputs) table_outputs.append(out) table=np.stack(table_outputs,axis=2) np.mean(np.round(table),axis=0) return table #dT=pd.DataFrame(index=p_names, data=T, columns=do_names) #T=np.mean(np.round(table),axis=0) #table=get_interv_table(model)
Example 10
def preprocess_images(images): if images.shape[0] < 4: # single image x_t = images[0] x_t = imresize(x_t, (80, 80)) x_t = x_t.astype("float") x_t /= 255.0 s_t = np.stack((x_t, x_t, x_t, x_t), axis=2) else: # 4 images xt_list = [] for i in range(images.shape[0]): x_t = imresize(images[i], (80, 80)) x_t = x_t.astype("float") x_t /= 255.0 xt_list.append(x_t) s_t = np.stack((xt_list[0], xt_list[1], xt_list[2], xt_list[3]), axis=2) s_t = np.expand_dims(s_t, axis=0) return s_t ############################# main ###############################
Example 11
def preprocess_images(images): if images.shape[0] < 4: # single image x_t = images[0] x_t = imresize(x_t, (80, 80)) x_t = x_t.astype("float") x_t /= 255.0 s_t = np.stack((x_t, x_t, x_t, x_t), axis=2) else: # 4 images xt_list = [] for i in range(images.shape[0]): x_t = imresize(images[i], (80, 80)) x_t = x_t.astype("float") x_t /= 255.0 xt_list.append(x_t) s_t = np.stack((xt_list[0], xt_list[1], xt_list[2], xt_list[3]), axis=2) s_t = np.expand_dims(s_t, axis=0) return s_t
Example 12
def calc_metrics(self, data_gen, history, dataset, logs): y_true = [] predictions = [] for i in range(data_gen.steps): if self.verbose == 1: print "\r\tdone {}/{}".format(i, data_gen.steps), (x,y) = next(data_gen) pred = self.model.predict(x, batch_size=self.batch_size) if isinstance(x, list) and len(x) == 2: # deep supervision for m, t, p in zip(x[1].flatten(), y.flatten(), pred.flatten()): if np.equal(m, 1): y_true.append(t) predictions.append(p) else: y_true += list(y.flatten()) predictions += list(pred.flatten()) print "\n" predictions = np.array(predictions) predictions = np.stack([1-predictions, predictions], axis=1) ret = metrics.print_metrics_binary(y_true, predictions) for k, v in ret.iteritems(): logs[dataset + '_' + k] = v history.append(ret)
Example 13
def calc_metrics(self, data, history, dataset, logs): y_true = [] predictions = [] B = self.batch_size for i in range(0, len(data[0]), B): if self.verbose == 1: print "\r\tdone {}/{}".format(i, len(data[0])), (x,y) = (data[0][i:i+B], data[1][i:i+B]) outputs = self.model.predict(x, batch_size=B) if isinstance(y[0], list): # target replication y_true += list(y[0].flatten()) predictions += list(outputs[0].flatten()) else: y_true += list(np.array(y).flatten()) predictions += list(outputs.flatten()) print "\n" predictions = np.array(predictions) predictions = np.stack([1-predictions, predictions], axis=1) ret = metrics.print_metrics_binary(y_true, predictions) for k, v in ret.iteritems(): logs[dataset + '_' + k] = v history.append(ret)
Example 14
def get_document_batch(self, doc_id): """builds batch of all mention pairs in one document Args: doc_id: id of document Returns: feature representation of mentions and labels """ mentions = self.dl.get_all_mentions_from_doc(doc_id) if len(mentions) == 0: return None, None A, B = [], [] for a in mentions: for b in mentions: A.append(a) B.append(b) A_f = [self._mention_to_features(m) for m in A] B_f = [self._mention_to_features(m) for m in B] AB_f = self._pair_features(A, B) A = [self.dl.mention_features[m] for m in A] B = [self.dl.mention_features[m] for m in B] return np.vstack(A), np.stack(A_f), np.vstack(B), np.stack(B_f), np.stack(AB_f)
Example 15
def get_space_separated_data(raw): assert raw[0].strip().endswith("= [") assert raw[-1].strip().endswith("];") header = raw[0].replace("= [", "").strip() shape = tuple(eval(header[header.index("["):header.index("]") + 1])) data = [eval(line.strip().replace(" ", ",")) for line in raw[1:-1]] if len(shape) == 1: step_size = 1 else: step_size = functools.reduce(operator.mul, shape[:-1]) years = np.array(data[::step_size + 1], dtype=int) subarrays = [ np.array(data[index * (step_size + 1) + 1:(index + 1) * (step_size + 1)]).reshape(shape) for index in range(len(years)) ] return header, years, np.stack(subarrays, axis=-1)
Example 16
def generate(self): for pid in self.id2candidates_path.iterkeys(): patient_path = self.id2patient_path[pid] print pid, patient_path img, pixel_spacing = utils_lung.read_dicom_scan(patient_path) print self.id2candidates_path[pid] candidates = utils.load_pkl(self.id2candidates_path[pid]) print candidates.shape for candidate in candidates: y_batch = np.array(candidate, dtype='float32') patch_center = candidate[:3] batch = [] for i in range(self.tta): batch.append(np.float32(self.data_prep_fun(data=img, patch_center=patch_center, pixel_spacing=pixel_spacing))) x_batch = np.stack(batch) print x_batch.shape yield x_batch, y_batch, [pid]
Example 17
def adjust_prediction(self, probability, image): crf = dcrf.DenseCRF(np.prod(probability.shape), 2) # crf = dcrf.DenseCRF(np.prod(probability.shape), 1) binary_prob = np.stack((1 - probability, probability), axis=0) unary = unary_from_softmax(binary_prob) # unary = unary_from_softmax(np.expand_dims(probability, axis=0)) crf.setUnaryEnergy(unary) # per dimension scale factors sdims = [self.sdims] * 3 smooth = create_pairwise_gaussian(sdims=sdims, shape=probability.shape) crf.addPairwiseEnergy(smooth, compat=2) if self.schan: # per channel scale factors schan = [self.schan] * 6 appearance = create_pairwise_bilateral(sdims=sdims, schan=schan, img=image, chdim=3) crf.addPairwiseEnergy(appearance, compat=2) result = crf.inference(self.iter) crf_prediction = np.argmax(result, axis=0).reshape(probability.shape).astype(np.float32) return crf_prediction
Example 18
def read_images( filenames, domain=None, image_size=64): images = [] for fn in filenames: image = cv2.imread(fn) if image is None: continue if domain == 'A': kernel = np.ones((3,3), np.uint8) image = image[:, :256, :] image = 255. - image image = cv2.dilate( image, kernel, iterations=1 ) image = 255. - image elif domain == 'B': image = image[:, 256:, :] image = cv2.resize(image, (image_size,image_size)) image = image.astype(np.float32) / 255. image = image.transpose(2,0,1) images.append( image ) images = np.stack( images ) return images
Example 19
def plot_current_errors(self, epoch, counter_ratio, opt, errors): if not hasattr(self, 'plot_data'): self.plot_data = {'X':[],'Y':[], 'legend':list(errors.keys())} self.plot_data['X'].append(epoch + counter_ratio) self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']]) self.vis.line( X=np.stack([np.array(self.plot_data['X'])]*len(self.plot_data['legend']),1), Y=np.array(self.plot_data['Y']), opts={ 'title': self.name + ' loss over time', 'legend': self.plot_data['legend'], 'xlabel': 'epoch', 'ylabel': 'loss'}, win=self.display_id) # errors: same format as |errors| of plotCurrentErrors
Example 20
def mapFunction( x , y , func , ax = None, arrayInput = False, n = 10, title = None, **kwargs ) : """ Plot function on a regular grid x : 1d array y : 1d array func : function to map arrayInput : False if func(x,y) , True if func( [x,y] ) """ if ax is None : fig , ax = plt.subplots() X , Y = np.meshgrid( x , y ) if not arrayInput : Z = func( X.flatten() , Y.flatten() ).reshape(X.shape) else : Z = func( np.stack( [ X.flatten() , Y.flatten() ]) ) ax.contourf( X , Y , Z , n , **kwargs) if title is not None : ax.set_title(title) return ax
Example 21
def process_observation(self, obs): """ Take in the current observation, do any necessary processing and return the processed observation. A return value of None indicates that there is no observation yet. A random action will be taken. """ self.current_sequence.append(obs) if len(self.current_sequence) < self.observations: return None if len(self.current_sequence) > self.observations: self.current_sequence.pop(0) # convert current sequence to input # stacking essentially adds a single axis, want it to be after obs_seq = np.stack(self.current_sequence, axis=len(obs.shape)) return obs_seq
Example 22
def wavelength_to_XYZ(wavelength, observer='1931_2deg'): ''' Uses tristimulus color matching functions to map a awvelength to XYZ coordinates. Args: wavelength (`float`): wavelength in nm. observer (`str`): CIE observer name, must be 1931_2deg. Returns: `numpy.ndarray`: array with last dimension corresponding to X, Y, Z. ''' wavelength = np.asarray(wavelength, dtype=config.precision) cmf = get_cmf(observer) wvl, X, Y, Z = cmf['wvl'], cmf['X'], cmf['Y'], cmf['Z'] ia = {'bounds_error': False, 'fill_value': 0, 'assume_sorted': True} f_X, f_Y, f_Z = interp1d(wvl, X, **ia), interp1d(wvl, Y, **ia), interp1d(wvl, Z, **ia) x, y, z = f_X(wavelength), f_Y(wavelength), f_Z(wavelength) shape = wavelength.shape return np.stack((x, y, z), axis=len(shape))
Example 23
def XYZ_to_uvprime(XYZ): ''' Converts XYZ points to u'v' points. Args: XYZ (`numpy.ndarray`): ndarray with last dimension corresponding to X, Y, Z. Returns: `tuple` containing: `numpy.ndarray`: u' coordinates. `numpy.ndarray`: u' coordinates. ''' XYZ = np.asarray(XYZ) X, Y, Z = XYZ[..., 0], XYZ[..., 1], XYZ[..., 2] u = (4 * X) / (X + 15 * Y + 3 * Z) v = (9 * Y) / (X + 15 * Y + 3 * Z) shape = u.shape return np.stack((u, v), axis=len(shape))
Example 24
def Luv_to_chroma_hue(luv): ''' Converts L*u*v* coordiantes to a chroma and hue. Args: luv (`numpy.ndarray`): array with last dimension L*, u*, v*. Returns: `numpy.ndarray` with last dimension corresponding to C* and h. ''' luv = np.asarray(luv) u, v = luv[..., 1], luv[..., 2] C = sqrt(u**2 + v**2) h = atan2(v, u) shape = luv.shape return np.stack((C, h), axis=len(shape))
Example 25
def uvprime_to_xy(uv): ''' Converts u' v' points to xyY x,y points. Args: uv (`numpy.ndarray`): ndarray with last dimension corresponding to u', v'. Returns: `tuple` containing: `numpy.ndarray`: x coordinates. `numpy.ndarray`: y coordinates. ''' uv = np.asarray(uv) u, v = uv[..., 0], uv[..., 1] x = (9 * u) / (6 * u - 16 * v + 12) y = (4 * v) / (6 * u - 16 * v + 12) shape = x.shape return np.stack((x, y), axis=len(shape))
Example 26
def __call__(self, process_func): def wrapper(*args): data_obj = args[1] if (len(data_obj.shape) <= self.input_dim or data_obj.shape[-1] == 1): return process_func(*args) else: pool = mp.Pool(mp.cpu_count())# TODO: make configurable arglist = [ (args[0],) + (data_obj[...,i],) + args[2:] for i in range(data_obj.shape[-1]) ] result = pool.map(self.worker, arglist) if self.output_dim > self.input_dim: # expanding return np.stack(result, -1) else: # contracting return np.concatenate(result, -1) return wrapper
Example 27
def rvs(self, size=1): ''' Generates random variates from the copula. Parameters ---------- size : integer, optional The number of samples to generate. (Default: 1) Returns ------- samples : array_like n-by-2 matrix of samples where n is the number of samples. ''' samples = np.stack((uniform.rvs(size=size), uniform.rvs(size=size)), axis=1) samples[:, 0] = self.ppcf(samples) return samples
Example 28
def plot_current_errors(self, epoch, counter_ratio, opt, errors): if not hasattr(self, 'plot_data'): self.plot_data = {'X':[],'Y':[], 'legend':list(errors.keys())} self.plot_data['X'].append(epoch + counter_ratio) self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']]) self.vis.line( X=np.stack([np.array(self.plot_data['X'])]*len(self.plot_data['legend']),1), Y=np.array(self.plot_data['Y']), opts={ 'title': self.name + ' loss over time', 'legend': self.plot_data['legend'], 'xlabel': 'epoch', 'ylabel': 'loss'}, win=self.display_id) # errors: same format as |errors| of plotCurrentErrors
Example 29
def colorize_raster(masks): ''' (H, W, 10) -> (H, W, 3) ''' assert masks.shape[2] == 10 palette = np.array([(180, 180, 180), (100, 100, 100), # Buildings, Misc. (6, 88, 179), (125, 194, 223), # Road, Track (55, 120, 27), (160, 219, 166), # Trees, Crops (209, 173, 116), (180, 117, 69), # Waterway, Standing (67, 109, 244), (39, 48, 215)], dtype=np.uint8) # Car r = [] for obj_type in range(10): c = palette[obj_type] result = np.stack([masks[:, :, obj_type]] * 3, axis=2) r.append(result * c) r = np.stack(r) r = np.max(r, axis=0) return r
Example 30
def load_embeddings(filename): """Loads embedings, returns weight matrix and dict from words to indices.""" weight_vectors = [] word_idx = {} with codecs.open(filename, encoding='utf-8') as f: for line in f: word, vec = line.split(u' ', 1) word_idx[word] = len(weight_vectors) weight_vectors.append(np.array(vec.split(), dtype=np.float32)) # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and # '-RRB-' respectively in the parse-trees. word_idx[u'-LRB-'] = word_idx.pop(u'(') word_idx[u'-RRB-'] = word_idx.pop(u')') # Random embedding vector for unknown words. weight_vectors.append(np.random.uniform( -0.05, 0.05, weight_vectors[0].shape).astype(np.float32)) return np.stack(weight_vectors), word_idx
Example 31
def make_batch(batch_size): batch_idx = np.random.choice(len(data),batch_size) batch_sequences = [data[idx] for idx in batch_idx] strokes = [] lengths = [] indice = 0 for seq in batch_sequences: len_seq = len(seq[:,0]) new_seq = np.zeros((Nmax,5)) new_seq[:len_seq,:2] = seq[:,:2] new_seq[:len_seq-1,2] = 1-seq[:-1,2] new_seq[:len_seq,3] = seq[:,2] new_seq[(len_seq-1):,4] = 1 new_seq[len_seq-1,2:4] = 0 lengths.append(len(seq[:,0])) strokes.append(new_seq) indice += 1 if use_cuda: batch = Variable(torch.from_numpy(np.stack(strokes,1)).cuda().float()) else: batch = Variable(torch.from_numpy(np.stack(strokes,1)).float()) return batch, lengths ################################ adaptive lr
Example 32
def make_target(self, batch, lengths): if use_cuda: eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\ *batch.size()[1]).cuda()).unsqueeze(0) else: eos = Variable(torch.stack([torch.Tensor([0,0,0,0,1])]\ *batch.size()[1])).unsqueeze(0) batch = torch.cat([batch, eos], 0) mask = torch.zeros(Nmax+1, batch.size()[1]) for indice,length in enumerate(lengths): mask[:length,indice] = 1 if use_cuda: mask = Variable(mask.cuda()).detach() else: mask = Variable(mask).detach() dx = torch.stack([Variable(batch.data[:,:,0])]*hp.M,2).detach() dy = torch.stack([Variable(batch.data[:,:,1])]*hp.M,2).detach() p1 = Variable(batch.data[:,:,2]).detach() p2 = Variable(batch.data[:,:,3]).detach() p3 = Variable(batch.data[:,:,4]).detach() p = torch.stack([p1,p2,p3],2) return mask,dx,dy,p
Example 33
def __call__(self, tensor): """ Args: tensor (Tensor): Tensor of audio of size (samples x channels) Returns: tensor (Tensor): n_mels x hops x channels (BxLxC), where n_mels is the number of mel bins, hops is the number of hops, and channels is unchanged. """ if librosa is None: print("librosa not installed, cannot create spectrograms") return tensor L = [] for i in range(tensor.size(1)): nparr = tensor[:, i].numpy() # (samples, ) sgram = librosa.feature.melspectrogram( nparr, **self.kwargs) # (n_mels, hops) L.append(sgram) L = np.stack(L, 2) # (n_mels, hops, channels) tensor = torch.from_numpy(L).type_as(tensor) return tensor
Example 34
def test_shapes(self): dims = [ ((1, 1), (2, 1, 1)), # broadcast first argument ((2, 1, 1), (1, 1)), # broadcast second argument ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match ] for dt, (dm1, dm2) in itertools.product(self.types, dims): a = np.ones(dm1, dtype=dt) b = np.ones(dm2, dtype=dt) res = self.matmul(a, b) assert_(res.shape == (2, 1, 1)) # vector vector returns scalars. for dt in self.types: a = np.ones((2,), dtype=dt) b = np.ones((2,), dtype=dt) c = self.matmul(a, b) assert_(np.array(c).shape == ())
Example 35
def train_priority(self , state , reward , action , state_next , done, batch_ISweight): q , q_target = self.sess.run([self.q_value , self.q_target] , feed_dict={self.inputs_q : state , self.inputs_target : state_next } ) # DoubleDQN if self.double: q_next = self.sess.run(self.q_value , feed_dict={self.inputs_q : state_next}) action_best = np.argmax(q_next , axis = 1) q_target_best = self.sess.run(self.q_target_action , feed_dict={self.action_best : action_best, self.q_target : q_target}) else: q_target_best = np.max(q_target , axis = 1) # dqn q_target_best_mask = ( 1.0 - done) * q_target_best target = reward + self.gamma * q_target_best_mask batch_ISweight = np.stack([batch_ISweight , batch_ISweight] , axis = -1 ) loss, td_error, _ = self.sess.run([self.loss , self.td_error, self.train_op] , feed_dict={self.inputs_q: state , self.target:target , self.action:action, self.ISweight : batch_ISweight ,} ) return td_error # self.loss_his.append(loss) # =============================================================== # A3C Agent # ===============================================================
Example 36
def stack_and_pad(values: List[Union[np.ndarray, int, float]], pad=0) -> np.ndarray: """Pads a list of numpy arrays so that they have equal dimensions, then stacks them.""" if isinstance(values[0], int) or isinstance(values[0], float): return np.array(values) dims = len(values[0].shape) max_shape = [max(sizes) for sizes in zip(*[v.shape for v in values])] padded_values = [] for value in values: pad_width = [(0, max_shape[i] - value.shape[i]) for i in range(dims)] padded_value = np.lib.pad(value, pad_width, mode='constant', constant_values=pad) padded_values.append(padded_value) return np.stack(padded_values)
Example 37
def join_embeddings(src_we, target_we): """joins and filters words not in common and produces two tensors""" src_w = set(src_we.keys()) target_w = set(target_we.keys()) common_w = src_w & target_w src_tensor = [] target_tensor = [] for w in common_w: src_tensor.append(src_we[w]) target_tensor.append(target_we[w]) src_tensor = torch.Tensor(np.stack(src_tensor)) target_tensor = torch.Tensor(np.stack(target_tensor)) return src_tensor, target_tensor
Example 38
def decode_bboxes(tcoords, anchors): var_x, var_y, var_w, var_h = config['prior_variance'] t_x = tcoords[:, 0]*var_x t_y = tcoords[:, 1]*var_y t_w = tcoords[:, 2]*var_w t_h = tcoords[:, 3]*var_h a_w = anchors[:, 2] a_h = anchors[:, 3] a_x = anchors[:, 0]+a_w/2 a_y = anchors[:, 1]+a_h/2 x = t_x*a_w + a_x y = t_y*a_h + a_y w = tf.exp(t_w)*a_w h = tf.exp(t_h)*a_h x1 = tf.maximum(0., x - w/2) y1 = tf.maximum(0., y - h/2) x2 = tf.minimum(1., w + x1) y2 = tf.minimum(1., h + y1) return tf.stack([y1, x1, y2, x2], axis=1)
Example 39
def encode_bboxes(proposals, gt): prop_x = proposals[:, 0] prop_y = proposals[:, 1] prop_w = proposals[:, 2] prop_h = proposals[:, 3] gt_x = gt[:, 0] gt_y = gt[:, 1] gt_w = gt[:, 2] gt_h = gt[:, 3] diff_x = (gt_x + 0.5*gt_w - prop_x - 0.5*prop_w)/prop_w diff_y = (gt_y + 0.5*gt_h - prop_y - 0.5*prop_h)/prop_h if len(gt) > 0 and (np.min(gt_w/prop_w) < 1e-6 or np.min(gt_h/prop_h) < 1e-6): print(np.min(gt_w), np.min(gt_h), np.min(gt_w/prop_w), np.max(gt_h/prop_h)) diff_w = np.log(gt_w/prop_w) diff_h = np.log(gt_h/prop_h) var_x, var_y, var_w, var_h = config['prior_variance'] x = np.stack([diff_x/var_x, diff_y/var_y, diff_w/var_w, diff_h/var_h], axis=1) return x
Example 40
def batch(states, batch_size=None): """Combines a collection of state structures into a batch, padding if needed. Args: states: A collection of individual nested state structures. batch_size: The desired final batch size. If the nested state structure that results from combining the states is smaller than this, it will be padded with zeros. Returns: A single state structure that results from stacking the structures in `states`, with padding if needed. Raises: ValueError: If the number of input states is larger than `batch_size`. """ if batch_size and len(states) > batch_size: raise ValueError('Combined state is larger than the requested batch size') def stack_and_pad(*states): stacked = np.stack(states) if batch_size: stacked.resize([batch_size] + list(stacked.shape)[1:]) return stacked return tf_nest.map_structure(stack_and_pad, *states)
Example 41
def second_order_nic(x): """ transform x1 x2 ---> 1 x1 x2 x1x2 x1**2 x2**2 nic : no initial constant """ ones = np.ones(len(x)) x1 = x[:, 0] x2 = x[:, 1] x1_sqr = x1**2 x2_sqr = x2**2 x1x2 = x1 * x2 return np.stack([ones, x1, x2, x1x2, x1_sqr, x2_sqr], axis=1) # STOCHASTIC GRADIENT DESCENT
Example 42
def generate_quadratic_parameters(trials): """ax^2 + b""" def transform(x): """ transform x1 ---> 1 x1**2 """ ones = np.ones(len(x)) x1 = x[:, 0] x1_sqr = x1 ** 2 return np.stack([ones, x1_sqr], axis=1) new_trials = [ DataML((training_set.z, training_set.y), transform) for training_set in trials ] weights = [ linear_percepton(training_set.z, training_set.y) for training_set in new_trials ] return np.array(weights)
Example 43
def recalculate_objects(pred_dict, image): proposals = pred_dict['rpn_prediction']['proposals'] proposals_prob = pred_dict['classification_prediction']['rcnn']['cls_prob'] proposals_target = proposals_prob.argmax(axis=1) - 1 bbox_offsets = pred_dict[ 'classification_prediction']['rcnn']['bbox_offsets'] bbox_offsets = bbox_offsets[proposals_target >= 0] proposals = proposals[proposals_target >= 0] proposals_target = proposals_target[proposals_target >= 0] bbox_offsets_idx_pairs = np.stack( np.array([ proposals_target * 4, proposals_target * 4 + 1, proposals_target * 4 + 2, proposals_target * 4 + 3]), axis=1) bbox_offsets = np.take(bbox_offsets, bbox_offsets_idx_pairs.astype(np.int)) bboxes = decode(proposals, bbox_offsets) return bboxes, proposals_target
Example 44
def next_batch(self, batch_size): batches_ids = set() while len(batches_ids) < batch_size: h = random.randint(0, self.t_h-self.h) w = random.randint(0, self.t_w-self.w) d = random.randint(0, self.t_d-self.d) batches_ids.add((h, w, d)) image_batches = [] label_batches = [] for h, w, d in batches_ids: image_batches.append( self.images[h:h+self.h, w:w+self.w, d:d+self.d]) label_batches.append( self.labels[h:h+self.h, w:w+self.w, d:d+self.d]) images = np.expand_dims(np.stack(image_batches, axis=0), axis=-1) images = np.transpose(images, (0, 3, 1, 2, 4)) labels = np.stack(label_batches, axis=0) labels = np.transpose(labels, (0, 3, 1, 2)) return images, labels
Example 45
def process(input_dir, output_dir, model_dir, resizing_size, gpu): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3, visible_device_list=gpu) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)).as_default(): m = loader.LoadedModel(model_dir) os.makedirs(output_dir, exist_ok=True) input_filenames = glob(os.path.join(input_dir, '*.jpg')) + \ glob(os.path.join(input_dir, '*.png')) + \ glob(os.path.join(input_dir, '*.tif')) + \ glob(os.path.join(input_dir, '*.jp2')) for path in tqdm(input_filenames): img = Image.open(path).resize(resizing_size) mat = np.asarray(img) if len(mat.shape) == 2: mat = np.stack([mat, mat, mat], axis=2) predictions = m.predict(mat[None], prediction_key='labels')[0] plt.imsave(os.path.join(output_dir, os.path.relpath(path, input_dir)), predictions)
Example 46
def test_hexahedron(): val = quadpy.hexahedron.integrate( lambda x: numpy.exp(x[0]), quadpy.hexahedron.cube_points([0.0, 1.0], [0.0, 1.0], [0.0, 1.0]), quadpy.hexahedron.Product(quadpy.line_segment.NewtonCotesClosed(3)) ) val = quadpy.hexahedron.integrate( lambda x: [numpy.exp(x[0]), numpy.exp(x[1])], numpy.stack([ quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]), quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]), quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]), quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]), quadpy.hexahedron.cube_points([0, 1], [0, 1], [0, 1]), ], axis=-2), quadpy.hexahedron.Product(quadpy.line_segment.NewtonCotesClosed(3)) ) assert val.shape == (2, 5) return
Example 47
def test_quadrilateral(): quadpy.quadrilateral.integrate( lambda x: numpy.exp(x[0]), quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]), quadpy.quadrilateral.Stroud('C2 5-4') ) val = quadpy.quadrilateral.integrate( lambda x: [numpy.exp(x[0]), numpy.exp(x[1])], numpy.stack([ quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]), quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]), quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]), quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]), quadpy.quadrilateral.rectangle_points([0.0, 1.0], [0.0, 1.0]), ], axis=-2), quadpy.quadrilateral.Stroud('C2 3-1') ) assert val.shape == (2, 5) return
Example 48
def test_tetrahedron(): quadpy.tetrahedron.integrate( lambda x: numpy.exp(x[0]), numpy.array([ [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] ], dtype=float), quadpy.tetrahedron.ShunnHam(3) ) quadpy.tetrahedron.integrate( lambda x: [numpy.exp(x[0]), numpy.exp(x[1])], numpy.stack([ [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]], [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]], [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]], [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0], [0.0, 0, 1]], ], axis=-2), quadpy.tetrahedron.ShunnHam(3) ) return
Example 49
def test_triangle(): quadpy.triangle.integrate( lambda x: numpy.exp(x[0]), numpy.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]]), quadpy.triangle.Cubtri() ) val = quadpy.triangle.integrate( lambda x: [numpy.exp(x[0]), numpy.exp(x[1])], numpy.stack([ [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]], [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]], [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]], [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]], [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]], ], axis=-2), quadpy.triangle.Cubtri() ) assert val.shape == (2, 5) return
Example 50
def transform(xi, cube): '''Transform the points `xi` from the reference cube to `cube`. ''' # For d==2, the result used to be computed with # # out = ( # + outer(0.25*(1.0-xi[0])*(1.0-xi[1]), cube[0, 0]) # + outer(0.25*(1.0+xi[0])*(1.0-xi[1]), cube[1, 0]) # + outer(0.25*(1.0-xi[0])*(1.0+xi[1]), cube[0, 1]) # + outer(0.25*(1.0+xi[0])*(1.0+xi[1]), cube[1, 1]) # ) # # This array of multiplications and additions is reminiscent of dot(), and # indeed tensordot() can handle the situation. We just need to compute the # `1+-xi` products and align them with `cube`. one_mp_xi = numpy.stack([ 0.5 * (1.0 - xi), 0.5 * (1.0 + xi), ], axis=1) a = helpers.n_outer(one_mp_xi) # TODO kahan tensordot # <https://stackoverflow.com/q/45372098/353337> d = xi.shape[0] return numpy.tensordot(a, cube, axes=(range(d), range(d)))