The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def _zero_one_normalize(predictions, epsilon=1e-7): """Normalize the predictions to the range between 0.0 and 1.0. For some predictions like SVM predictions, we need to normalize them before calculate the interpolated average precision. The normalization will not change the rank in the original list and thus won't change the average precision. Args: predictions: a numpy 1-D array storing the sparse prediction scores. epsilon: a small constant to avoid denominator being zero. Returns: The normalized prediction. """ denominator = numpy.max(predictions) - numpy.min(predictions) ret = (predictions - numpy.min(predictions)) / numpy.max(denominator, epsilon) return ret
Example 2
def predicted_vs_actual_y_xgb(self, xgb, best_nrounds, xgb_params, x_train_split, x_test_split, y_train_split, y_test_split, title_name): # Split the training data into an extra set of test # x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train) dtrain_split = xgb.DMatrix(x_train_split, label=y_train_split) dtest_split = xgb.DMatrix(x_test_split) print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split)) gbdt = xgb.train(xgb_params, dtrain_split, best_nrounds) y_predicted = gbdt.predict(dtest_split) plt.figure(figsize=(10, 5)) plt.scatter(y_test_split, y_predicted, s=20) rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split) plt.title(''.join([title_name, ', Predicted vs. Actual.', ' rmse = ', str(rmse_pred_vs_actual)])) plt.xlabel('Actual y') plt.ylabel('Predicted y') plt.plot([min(y_test_split), max(y_test_split)], [min(y_test_split), max(y_test_split)]) plt.tight_layout()
Example 3
def update_data_sort_order(self, new_sort_order=None): if new_sort_order is not None: self.current_order = new_sort_order self.update_sort_idcs() self.data_image.set_extent((self.raw_lags[0], self.raw_lags[-1], 0, len(self.sort_idcs))) self.data_ax.set_ylim(0, len(self.sort_idcs)) all_raw_data = self.raw_data all_raw_data /= (1 + self.raw_data.mean(1)[:, np.newaxis]) if len(all_raw_data) > 0: cmax = 0.5*all_raw_data.max() cmin = 0.5*all_raw_data.min() all_raw_data = all_raw_data[self.sort_idcs, :] else: cmin = 0 cmax = 1 self.data_image.set_data(all_raw_data) self.data_image.set_clim(cmin, cmax) self.data_selection.set_y(len(self.sort_idcs)-len(self.selected_points)) self.data_selection.set_height(len(self.selected_points)) self.update_data_plot()
Example 4
def plot_electrodes(self): if not getattr(self, 'collections', None): # It is important to set one facecolor per point so that we can change # it later self.electrode_collection = self.electrode_ax.scatter(self.x_position, self.y_position, facecolor=['black' for _ in self.x_position], s=30) self.electrode_ax.set_xlabel('Space [um]') self.electrode_ax.set_xticklabels([]) self.electrode_ax.set_ylabel('Space [um]') self.electrode_ax.set_yticklabels([]) else: self.electrode_collection.set_offsets(np.hstack([self.x_position[np.newaxis, :].T, self.y_position[np.newaxis, :].T])) ax, x, y = self.electrode_ax, self.y_position, self.x_position ymin, ymax = min(x), max(x) yrange = (ymax - ymin)*0.5 * 1.05 # stretch everything a bit ax.set_ylim((ymax + ymin)*0.5 - yrange, (ymax + ymin)*0.5 + yrange) xmin, xmax = min(y), max(y) xrange = (xmax - xmin)*0.5 * 1.05 # stretch everything a bit ax.set_xlim((xmax + xmin)*0.5 - xrange, (xmax + xmin)*0.5 + xrange) self.ui.raw_data.draw_idle()
Example 5
def _zero_one_normalize(predictions, epsilon=1e-7): """Normalize the predictions to the range between 0.0 and 1.0. For some predictions like SVM predictions, we need to normalize them before calculate the interpolated average precision. The normalization will not change the rank in the original list and thus won't change the average precision. Args: predictions: a numpy 1-D array storing the sparse prediction scores. epsilon: a small constant to avoid denominator being zero. Returns: The normalized prediction. """ denominator = numpy.max(predictions) - numpy.min(predictions) ret = (predictions - numpy.min(predictions)) / numpy.max(denominator, epsilon) return ret
Example 6
def _zero_one_normalize(predictions, epsilon=1e-7): """Normalize the predictions to the range between 0.0 and 1.0. For some predictions like SVM predictions, we need to normalize them before calculate the interpolated average precision. The normalization will not change the rank in the original list and thus won't change the average precision. Args: predictions: a numpy 1-D array storing the sparse prediction scores. epsilon: a small constant to avoid denominator being zero. Returns: The normalized prediction. """ denominator = numpy.max(predictions) - numpy.min(predictions) ret = (predictions - numpy.min(predictions)) / numpy.max(denominator, epsilon) return ret
Example 7
def removeTopPCs(X, numRemovePCs): t0 = time.time() X_mean = X.mean(axis=0) X -= X_mean XXT = symmetrize(blas.dsyrk(1.0, X, lower=0)) s,U = la.eigh(XXT) if (np.min(s) < -1e-4): raise Exception('Negative eigenvalues found') s[s<0]=0 ind = np.argsort(s)[::-1] U = U[:, ind] s = s[ind] s = np.sqrt(s) #remove null PCs ind = (s>1e-6) U = U[:, ind] s = s[ind] V = X.T.dot(U/s) #print 'max diff:', np.max(((U*s).dot(V.T) - X)**2) X = (U[:, numRemovePCs:]*s[numRemovePCs:]).dot((V.T)[numRemovePCs:, :]) X += X_mean return X
Example 8
def resample(image, scan, new_spacing=[1,1,1]): # Determine current pixel spacing spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing)) spacing = np.array(list(spacing)) resize_factor = spacing / new_spacing new_real_shape = image.shape * resize_factor new_shape = np.round(new_real_shape) real_resize_factor = new_shape / image.shape new_spacing = spacing / real_resize_factor #image = scipy.ndimage.interpolation.zoom(image, real_resize_factor) # nor mode= "wrap"/xxx, nor cval=-1024 can ensure that the min and max values are unchanged .... # cval added image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified #image = scipy.ndimage.zoom(image, real_resize_factor, order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2) #image = scipy.ndimage.zoom(image, real_resize_factor, mode='nearest', order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2) return image, new_spacing
Example 9
def fit(self, X_train, y_train, X_valid, y_valid, X_test, y_test, steps=400): tf.global_variables_initializer().run() redirect=FDRedirector(STDERR) for i in range(steps): redirect.start() feed_dict = {self.labels:y_train} for key, tensor in self.features.items(): feed_dict[tensor] = X_train[key] predictions, loss = sess.run([self.prediction, self.train_op], feed_dict=feed_dict) if i % 10 == 0: print("step:{} loss:{:.3g} np.std(predictions):{:.3g}".format(i, loss, np.std(predictions))) self.threshold = float(min(self.threshold_from_data(X_valid, y_valid), self.threshold_from_data(X_train, y_train))) tf.get_collection_ref("threshold")[0] = self.threshold self.print_metrics(X_train, y_train, "Training") self.print_metrics(X_valid, y_valid, "Validation") errors = redirect.stop() if errors: print(errors) self.print_metrics(X_test, y_test, "Test")
Example 10
def getLatLonRange(pbo_info, station_list): ''' Retrive the range of latitude and longitude occupied by a set of stations @param pbo_info: PBO Metadata @param station_list: List of stations @return list containg two tuples, lat_range and lon_range ''' coord_list = getStationCoords(pbo_info, station_list) lat_list = [] lon_list = [] for coord in coord_list: lat_list.append(coord[0]) lon_list.append(coord[1]) lat_range = (np.min(lat_list), np.max(lat_list)) lon_range = (np.min(lon_list), np.max(lon_list)) return [lat_range, lon_range]
Example 11
def conv1(model): n1, n2, x, y, z = model.conv1.W.shape fig = plt.figure() for nn in range(0, n1): ax = fig.add_subplot(4, 5, nn+1, projection='3d') ax.set_xlim(0.0, x) ax.set_ylim(0.0, y) ax.set_zlim(0.0, z) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) for xx in range(0, x): for yy in range(0, y): for zz in range(0, z): max = np.max(model.conv1.W.data[nn, :]) min = np.min(model.conv1.W.data[nn, :]) step = (max - min) / 1.0 C = (model.conv1.W.data[nn, 0, xx, yy, zz] - min) / step color = cm.cool(C) C = abs(1.0 - C) ax.plot(np.array([xx]), np.array([yy]), np.array([zz]), "o", color=color, ms=7.0*C, mew=0.1) plt.savefig("result/graph_conv1.png")
Example 12
def reshapeWeights(self, weights, normalize=True, modifier=None): # reshape the weights matrix to a grid for visualization n_rows = int(np.sqrt(weights.shape[1])) n_cols = int(np.sqrt(weights.shape[1])) kernel_size = int(np.sqrt(weights.shape[0]/3)) weights_grid = np.zeros((int((np.sqrt(weights.shape[0]/3)+1)*n_rows), int((np.sqrt(weights.shape[0]/3)+1)*n_cols), 3), dtype=np.float32) for i in range(weights_grid.shape[0]/(kernel_size+1)): for j in range(weights_grid.shape[1]/(kernel_size+1)): index = i * (weights_grid.shape[0]/(kernel_size+1))+j if not np.isclose(np.sum(weights[:, index]), 0): if normalize: weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size]=\ (weights[:, index].reshape(kernel_size, kernel_size, 3) - np.min(weights[:, index])) / ((np.max(weights[:, index]) - np.min(weights[:, index])) + 1.e-6) else: weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] =\ (weights[:, index].reshape(kernel_size, kernel_size, 3)) if modifier is not None: weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] *= modifier[index] return weights_grid
Example 13
def fill_hdf5_with_sparse_by_chunk(mym1,mym2,fname,chunksize): start1=0 end1=0 n=mym1.shape[0] f=h5py.File(fname,'w') m1hdf5=f.create_dataset('m1',shape=(n,n),dtype='float') m2hdf5=f.create_dataset('m2',shape=(n,n),dtype='float') while end1<n: end1=np.min([n,(start1+chunksize)]) print 'start1: '+str(start1) if (end1-start1)==1: m1hdf5[start1,:]=mym1[start1,:].toarray() m2hdf5[start1,:]=mym2[start1,:].toarray() else: m1hdf5[start1:end1,:]=mym1[start1:end1,:].toarray() m2hdf5[start1:end1,:]=mym2[start1:end1,:].toarray() start1=end1 print 'sum of 1' print m1hdf5[:,:].sum() print m2hdf5[:,:].sum() f.close()
Example 14
def __init__(self, target, instance, files): self.target = target self.instance = instance mask_files = natural_sort(filter(lambda fn: '_maskcrop.png' in fn, files)) depth_files = natural_sort(filter(lambda fn: '_depthcrop.png' in fn, files)) rgb_files = natural_sort(list(set(files) - set(mask_files) - set(depth_files))) loc_files = natural_sort(map(lambda fn: fn.replace('_crop.png', '_loc.txt'), rgb_files)) # Ensure all have equal number of files (Hack! doesn't ensure filename consistency) nfiles = np.min([len(loc_files), len(mask_files), len(depth_files), len(rgb_files)]) mask_files, depth_files, rgb_files, loc_files = mask_files[:nfiles], depth_files[:nfiles], \ rgb_files[:nfiles], loc_files[:nfiles] # print target, instance, len(loc_files), len(mask_files), len(depth_files), len(rgb_files) assert(len(mask_files) == len(depth_files) == len(rgb_files) == len(loc_files)) # Read images self.rgb = ImageDatasetReader.from_filenames(rgb_files) self.depth = ImageDatasetReader.from_filenames(depth_files) self.mask = ImageDatasetReader.from_filenames(mask_files) # Read top-left locations of bounding box self.locations = np.vstack([np.loadtxt(loc, delimiter=',', dtype=np.int32) for loc in loc_files])
Example 15
def add(self, desc): if self.built_: return if self.vocab_len_ < self.N_: Nd = len(desc) st, end = self.vocab_len_, min(self.vocab_len_ + Nd, self.N_) self.vocab_data_[st:end] = desc[:end-st] self.vocab_len_ += len(desc) print('Vocabulary building: {:}/{:}'.format(self.vocab_len_, self.N_)) else: print('Vocabulary built') self.built_ = True # else: # # Build vocab if not built already # self.voc_.build(self.vocab_data_, self.K_) # self.vocab_ = self.voc_.getCentroids() # sz = self.vocab_.shape[:2] # if sz[0] != self.K_ or sz[1] != self.D_: # raise RuntimeError('Voc error! KxD={:}x{:}, expected'.format(sz[0],sz[1],self.K_,self.D_)) # self.save('vocab.yaml.gz')
Example 16
def inc_region(self, dst, y, x, h, w): '''Incremets dst in the specified region. Runs fastest on np.int8, but not much slower on np.int16.''' dh, dw = dst.shape h2 = h // 2 w2 = w // 2 py = y - h2 px = x - w2 y_min = max(0, py) y_max = min(dh, y + h2) x_min = max(0, px) x_max = min(dw, x + w2) if y_max - y_min <= 0 or x_max - x_min <= 0: return dst[y_min:y_max, x_min:x_max] += 1
Example 17
def effective_sample_size(x, mu, var, logger): """ Calculate the effective sample size of sequence generated by MCMC. :param x: :param mu: mean of the variable :param var: variance of the variable :param logger: logg :return: effective sample size of the sequence Make sure that `mu` and `var` are correct! """ # batch size, time, dimension b, t, d = x.shape ess_ = np.ones([d]) for s in range(1, t): p = auto_correlation_time(x, s, mu, var) if np.sum(p > 0.05) == 0: break else: for j in range(0, d): if p[j] > 0.05: ess_[j] += 2.0 * p[j] * (1.0 - float(s) / t) logger.info('ESS: max [%f] min [%f] / [%d]' % (t / np.min(ess_), t / np.max(ess_), t)) return t / ess_
Example 18
def alleviate_conditioning_in_coordinates(self, condition=1e8): """pass scaling from `C` to `sigma_vec`. As a result, `C` is a correlation matrix, i.e., all diagonal entries of `C` are `1`. """ if max(self.dC) / min(self.dC) > condition: # allows for much larger condition numbers, if axis-parallel if hasattr(self, 'sm') and isinstance(self.sm, sampler.GaussFullSampler): old_coordinate_condition = max(self.dC) / min(self.dC) old_condition = self.sm.condition_number factors = self.sm.to_correlation_matrix() self.sigma_vec *= factors self.pc /= factors self._updateBDfromSM(self.sm) utils.print_message('\ncondition in coordinate system exceeded' ' %.1e, rescaled to %.1e, ' '\ncondition changed from %.1e to %.1e' % (old_coordinate_condition, max(self.dC) / min(self.dC), old_condition, self.sm.condition_number), iteration=self.countiter)
Example 19
def plot_axes_scaling(self, iabscissa=1): from matplotlib import pyplot if not hasattr(self, 'D'): self.load() dat = self if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]): pyplot.text(0, dat.D[-1, 5], 'all axes scaling values equal to %s' % str(dat.D[-1, 5]), verticalalignment='center') return self # nothing interesting to plot self._enter_plotting() pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b') # pyplot.hold(True) pyplot.grid(True) ax = array(pyplot.axis()) # ax[1] = max(minxend, ax[1]) pyplot.axis(ax) pyplot.title('Principle Axes Lengths') # pyplot.xticks(xticklocs) self._xlabel(iabscissa) self._finalize_plotting() return self
Example 20
def initialize(self, length=None): """see ``__init__``""" if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in range(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in range(length)], copy=False) lb = self._lb ub = self._ub # define added values for lower and upper bound self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
Example 21
def _solveRelativeDG(self, points): """ Solves the norm constrained version of the problem. min sum z_q st z_q >= c'x_q - 1 z_q >= 1 - c'x_q A'y = c b'y = 1 ||c|| = 1 y >= 0 """ if self.normalize_c == 1: error = self._solveRelativeDGNorm1(points) elif self.normalize_c == np.inf: error = self._solveRelativeDGNormInf(points) return error
Example 22
def xmatch_basic(ra1, dec1, ra2, dec2, match_radius=5.0): ''' This is a quick matcher that uses great_circle_dist to find the closest object in (ra2,dec2) within match_radius to (ra1,dec1). (ra1,dec1) must be a scalar pair, while (ra2,dec2) must be np.arrays of the same lengths. PARAMETERS: ra1/dec1: coordinates of the target to match ra2/dec2: coordinate np.arrays of the list of coordinates to match to RETURNS: A tuple like the following: (True -> no match or False -> matched, minimum distance between target and list) ''' min_dist_arcsec = np.min(great_circle_dist(ra1,dec1,ra2,dec2)) if (min_dist_arcsec < match_radius): return (True,min_dist_arcsec) else: return (False,min_dist_arcsec)
Example 23
def scatter2d(x,y,title='2dscatterplot',xlabel=None,ylabel=None): fig=plt.figure() plt.scatter(x,y) plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) if not 0<=np.min(x)<=np.max(x)<=1: raise ValueError('summary_scatter2d title:',title,' input x exceeded [0,1] range.\ min:',np.min(x),' max:',np.max(x)) if not 0<=np.min(y)<=np.max(y)<=1: raise ValueError('summary_scatter2d title:',title,' input y exceeded [0,1] range.\ min:',np.min(y),' max:',np.max(y)) plt.xlim([0,1]) plt.ylim([0,1]) return fig
Example 24
def test_t_start_t_stop(self): """ Tests if the t_start and t_stop arguments are correctly processed. """ filename = get_test_file_full_path( ioclass=NestIO, filename='0gid-1time-1256-0.gdf', directory=self.local_test_dir, clean=False) r = NestIO(filenames=filename) t_stop_targ = 490. * pq.ms t_start_targ = 410. * pq.ms seg = r.read_segment(gid_list=[], t_start=t_start_targ, t_stop=t_stop_targ, lazy=False, id_column_gdf=0, time_column_gdf=1) sts = seg.spiketrains self.assertTrue(np.max([np.max(st.magnitude) for st in sts if len(st) > 0]) < t_stop_targ.rescale(sts[0].times.units).magnitude) self.assertTrue(np.min([np.min(st.magnitude) for st in sts if len(st) > 0]) >= t_start_targ.rescale(sts[0].times.units).magnitude)
Example 25
def test_t_start_t_stop(self): """ Tests if the t_start and t_stop arguments are correctly processed. """ filename = get_test_file_full_path( ioclass=NestIO, filename='0gid-1time-1256-0.gdf', directory=self.local_test_dir, clean=False) r = NestIO(filenames=filename) t_stop_targ = 490. * pq.ms t_start_targ = 410. * pq.ms seg = r.read_segment(gid_list=[], t_start=t_start_targ, t_stop=t_stop_targ, lazy=False, id_column_gdf=0, time_column_gdf=1) sts = seg.spiketrains self.assertTrue(np.max([np.max(st.magnitude) for st in sts if len(st) > 0]) < t_stop_targ.rescale(sts[0].times.units).magnitude) self.assertTrue(np.min([np.min(st.magnitude) for st in sts if len(st) > 0]) >= t_start_targ.rescale(sts[0].times.units).magnitude)
Example 26
def CLAMP(self, param): """ CLAMP(value, min, max) make the value to be clamped into the range of [min, max] """ values = param[0] min_ = param[1] max_ = param[2] class Context: def __init__(self, min_, max_): self.min_ = min_ self.max_ = max_ def handleInput(self, value): if value < self.min_: return self.min_ elif value > self.max_: return self.max_ return value ctx = Context(min_, max_) result = values.apply(ctx.handleInput) return result
Example 27
def get_min_pos_kinect(): (depth,_) = get_depth() minVal = np.min(depth) #This is the minimum value from the depth image minPos = np.argmin(depth) #This is the raw index of the minimum value above xPos = np.mod(minPos, xSize) #This is the x component of the raw index yPos = minPos//xSize #This is the y component of the raw index xList.append(xPos) del xList[0] xPos = int(np.mean(xList)) yList.append(yPos) del yList[0] yPos = int(np.mean(yList)) return (xSize - xPos-10, yPos, minVal)
Example 28
def shorten_motifs( contig_motifs, highscore_motifs ): """ Keep only the shortest, most concise version of the high scoring motifs (reduces redundancy). """ keeper_motifs = set(highscore_motifs.keys()) if len(highscore_motifs)>0: shortest_contiguous = min([len(m.split("-")[0]) for m in highscore_motifs.keys()]) # (1) Sort by keys; shortest motif to longest motifs_s = sorted(highscore_motifs, key=len) # (2) For each motif, check if it's contained in a longer version of other motifs for m in motifs_s: motif_str = m.split("-")[0] motif_idx = int(m.split("-")[1]) for remaining in list(keeper_motifs): remaining_str = remaining.split("-")[0] remaining_idx = int(remaining.split("-")[1]) match = re.search(motif_str, remaining_str) if match != None and (motif_idx + match.start()) == remaining_idx and len(remaining_str) > len(motif_str): # 3. If True, remove the longer version keeper_motifs.remove(remaining) return keeper_motifs
Example 29
def get_score_bounds_from_range(Z_min, Z_max, rho_lb, rho_ub, L0_max = None): "global variables: L0_reg_ind" edge_values = np.vstack([Z_min * rho_lb, Z_max * rho_lb, Z_min * rho_ub, Z_max * rho_ub]) if L0_max is None or L0_max == Z_min.shape[0]: s_min = np.sum(np.min(edge_values, axis = 0)) s_max = np.sum(np.max(edge_values, axis = 0)) else: min_values = np.min(edge_values, axis = 0) s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max]) s_min_no_reg = np.sum(min_values[~L0_reg_ind]) s_min = s_min_reg + s_min_no_reg max_values = np.max(edge_values, axis = 0) s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max]) s_max_no_reg = np.sum(max_values[~L0_reg_ind]) s_max = s_max_reg + s_max_no_reg return s_min, s_max #setup weights
Example 30
def get_score_bounds(Z_min, Z_max, rho_lb, rho_ub, L0_reg_ind = None, L0_max = None): edge_values = np.vstack([Z_min * rho_lb, Z_max * rho_lb, Z_min * rho_ub, Z_max * rho_ub]) if (L0_max is None) or (L0_reg_ind is None) or (L0_max == Z_min.shape[0]): s_min = np.sum(np.min(edge_values, axis=0)) s_max = np.sum(np.max(edge_values, axis=0)) else: min_values = np.min(edge_values, axis=0) s_min_reg = np.sum(np.sort(min_values[L0_reg_ind])[0:L0_max]) s_min_no_reg = np.sum(min_values[~L0_reg_ind]) s_min = s_min_reg + s_min_no_reg max_values = np.max(edge_values, axis=0) s_max_reg = np.sum(-np.sort(-max_values[L0_reg_ind])[0:L0_max]) s_max_no_reg = np.sum(max_values[~L0_reg_ind]) s_max = s_max_reg + s_max_no_reg return s_min, s_max
Example 31
def main(command_line_parameters=None): """Preprocesses the given image with the given preprocessor.""" args = command_line_arguments(command_line_parameters) logger.debug("Loading preprocessor") preprocessor = bob.bio.base.load_resource(' '.join(args.preprocessor), "preprocessor") logger.debug("Loading input data from file '%s'%s", args.input_file, " and '%s'" % args.annotation_file if args.annotation_file is not None else "") data = preprocessor.read_original_data(BioFile(1, args.input_file, 2), "", "") annotations = bob.db.base.annotations.read_annotation_file(args.annotation_file, 'named') if args.annotation_file is not None else None logger.info("Preprocessing data") preprocessed = preprocessor(data, annotations) preprocessor.write_data(preprocessed, args.output_file) logger.info("Wrote preprocessed data to file '%s'", args.output_file) if args.convert_as_image is not None: converted = bob.core.convert(preprocessed, 'uint8', dest_range=(0,255), source_range=(numpy.min(preprocessed), numpy.max(preprocessed))) bob.io.base.save(converted, args.convert_as_image) logger.info("Wrote preprocessed data to image file '%s'", args.convert_as_image)
Example 32
def _zero_one_normalize(predictions, epsilon=1e-7): """Normalize the predictions to the range between 0.0 and 1.0. For some predictions like SVM predictions, we need to normalize them before calculate the interpolated average precision. The normalization will not change the rank in the original list and thus won't change the average precision. Args: predictions: a numpy 1-D array storing the sparse prediction scores. epsilon: a small constant to avoid denominator being zero. Returns: The normalized prediction. """ denominator = numpy.max(predictions) - numpy.min(predictions) ret = (predictions - numpy.min(predictions)) / numpy.max(denominator, epsilon) return ret
Example 33
def selectThreshold(yval,pval): '''???????''' bestEpsilon = 0. bestF1 = 0. F1 = 0. step = (np.max(pval)-np.min(pval))/1000 '''??''' for epsilon in np.arange(np.min(pval),np.max(pval),step): cvPrecision = pval<epsilon tp = np.sum((cvPrecision == 1) & (yval == 1)).astype(float) # sum???int???????float fp = np.sum((cvPrecision == 1) & (yval == 0)).astype(float) fn = np.sum((cvPrecision == 1) & (yval == 0)).astype(float) precision = tp/(tp+fp) # ??? recision = tp/(tp+fn) # ??? F1 = (2*precision*recision)/(precision+recision) # F1Score???? if F1 > bestF1: # ?????F1 Score bestF1 = F1 bestEpsilon = epsilon return bestEpsilon,bestF1 # ???
Example 34
def check_timestamps_left_part(self, df, midway_timestamps, amin, id): ''' Check left part :param df: :param df_grouped_by_id: :param midway_timestamps: :return: True if intermediate sale is in left part False otherwise. ''' df = df[df.id == id] df_timestamp_interval = df[(df.timestamp >= amin.values[0]) & (df.timestamp <= midway_timestamps)] df_timestamp_interval_aggregated = df_timestamp_interval.groupby('id').agg([np.min, np.max, len]) amin_left = df_timestamp_interval_aggregated[('timestamp', 'amin')] amax_left = df_timestamp_interval_aggregated[('timestamp', 'amax')] lenght_left = df_timestamp_interval_aggregated[('timestamp', 'len')] is_timestamp_diff_equal_len_left = (amax_left - amin_left).values == (lenght_left - 1) return is_timestamp_diff_equal_len_left, amin_left, amax_left, lenght_left
Example 35
def check_timestamps_right_part(self, df, midway_timestamps, amax, id): ''' Check right part :param df: :param df_grouped_by_id: :param midway_timestamps: :return: True if intermediate sale is in left part False otherwise. ''' df = df[df.id == id] df_timestamp_interval = df[(df.timestamp > midway_timestamps) & (df.timestamp <= amax.values[0])] df_timestamp_interval_aggregated = df_timestamp_interval.groupby('id').agg([np.min, np.max, len]) amin_right = df_timestamp_interval_aggregated[('timestamp', 'amin')] amax_right = df_timestamp_interval_aggregated[('timestamp', 'amax')] lenght_right = df_timestamp_interval_aggregated[('timestamp', 'len')] is_timestamp_diff_equal_len_right = (amax_right - amin_right).values == (lenght_right - 1) return is_timestamp_diff_equal_len_right, amin_right, amax_right, lenght_right
Example 36
def inspect(self, output = True): ''' short function that returns the image values: mean, standard deviation, max, min and size of image if output is True, it prints to the console the string containing the formatted value ''' m = np.mean(self.data) s = np.std(self.data) u = np.max(self.data) l = np.min(self.data) d = self.data.shape if output: s = "Mean: {0:.2f} | Std: {1:.2f} | Max: {2:.2f}|Min: {3:.2f} | \ Dim: {4[0]}x{4[1]}".format(m, s, u, l, d) print(s) return s return (m, s, u, l, d)
Example 37
def resize(im, target_size, max_size): """ only resize input image to target size and return scale :param im: BGR image input by opencv :param target_size: one dimensional size (the short side) :param max_size: one dimensional max size (the long side) :return: """ im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) # prevent bigger axis from being more than max_size: if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale
Example 38
def resize(im, target_size, max_size): """ only resize input image to target size and return scale :param im: BGR image input by opencv :param target_size: one dimensional size (the short side) :param max_size: one dimensional max size (the long side) :return: """ im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) return im, im_scale
Example 39
def test2(): patient_data_paths = utils_lung.get_patient_data_paths(pathfinder.DATA_PATH) print len(patient_data_paths) pixel_spacings_xy = [] n_slices = [] for k, p in enumerate(patient_data_paths): pid = utils_lung.extract_pid_dir(p) sid2data, sid2metadata = utils_lung.get_patient_data(p) mtd = sid2metadata.itervalues().next() assert mtd['PixelSpacing'][0] == mtd['PixelSpacing'][1] pixel_spacings_xy.append(mtd['PixelSpacing'][0]) n_slices.append(len(sid2metadata)) print pid, pixel_spacings_xy[-1], n_slices[-1] print 'nslices', np.max(n_slices), np.min(n_slices), np.mean(n_slices) counts = collections.Counter(pixel_spacings_xy) new_list = sorted(pixel_spacings_xy, key=counts.get, reverse=True) print 'spacing', new_list
Example 40
def draw2dsurface(X, Y, zf): fig = plt.figure() ax = fig.gca(projection='3d') X, Y = np.meshgrid(X, Y) Z = X*0 for i in range(len(X)): for j in range(len(X[0])): Z[i][j] = zf([X[i][j], Y[i][j]]) surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.set_zlim(np.min(Z.flatten()), np.max(Z.flatten())) ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) fig.colorbar(surf, shrink=0.5, aspect=5) # plt.show()
Example 41
def getCircularBounds(fitCloud=None,width=64,height=64,smoothing=0.01): circumference = 2*(width+height) if not fitCloud is None: cx = np.mean(fitCloud[:,0]) cy = np.mean(fitCloud[:,1]) r = 0.5* max( np.max(fitCloud[:,0])- np.min(fitCloud[:,0]),np.max(fitCloud[:,1])- np.min(fitCloud[:,1])) else: r = circumference /(2.0*math.pi) cx = cy = r perimeterPoints = np.zeros((circumference,2),dtype=float) for i in range(circumference): angle = (2.0*math.pi)*float(i) / circumference - math.pi * 0.5 perimeterPoints[i][0] = cx + r * math.cos(angle) perimeterPoints[i][1] = cy + r * math.sin(angle) bounds = {'top':perimeterPoints[0:width], 'right':perimeterPoints[width-1:width+height-1], 'bottom':perimeterPoints[width+height-2:2*width+height-2], 'left':perimeterPoints[2*width+height-3:]} bounds['s_top'],u = interpolate.splprep([bounds['top'][:,0], bounds['top'][:,1]],s=smoothing) bounds['s_right'],u = interpolate.splprep([bounds['right'][:,0],bounds['right'][:,1]],s=smoothing) bounds['s_bottom'],u = interpolate.splprep([bounds['bottom'][:,0],bounds['bottom'][:,1]],s=smoothing) bounds['s_left'],u = interpolate.splprep([bounds['left'][:,0],bounds['left'][:,1]],s=smoothing) return bounds
Example 42
def swapBlock(self,cells,d,tlx1,tly1,tlx2,tly2,cols,rows,width,height): if max(tlx1,tlx2)+cols < width and max(tly1,tly2)+rows < height and (max(tlx1,tlx2) - min(tlx1,tlx2) >= cols or max(tly1,tly2) - min(tly1,tly2) >= rows): temp = [] for row in range( rows): for col in range( cols): temp.append(d[cells[tlx1+col][tly1+row]]) d[cells[tlx1+col][tly1+row]] = d[cells[tlx2+col][tly2+row]] i = 0 for row in range( rows): for col in range( cols): d[cells[tlx2+col][tly2+row]] = temp[i] i+=1 return True else: return False
Example 43
def plot_data(self): # Right: raw data all_raw_data = self.raw_data cmax = 0.5*all_raw_data.max() cmin = 0.5*all_raw_data.min() self.update_sort_idcs() all_raw_data = all_raw_data[self.sort_idcs, :] self.data_image = self.data_ax.imshow(all_raw_data, interpolation='nearest', cmap='coolwarm', extent=(self.raw_lags[0], self.raw_lags[-1], 0, len(self.sort_idcs)), origin='lower') self.data_ax.set_aspect('auto') self.data_ax.spines['right'].set_visible(False) self.data_ax.spines['left'].set_visible(False) self.data_ax.spines['top'].set_visible(False) self.data_image.set_clim(cmin, cmax) self.inspect_markers = self.data_ax.scatter([], [], marker='<', clip_on=False, s=40) self.data_selection = mpl.patches.Rectangle((self.raw_lags[0], 0), width=self.raw_lags[-1] - self.raw_lags[0], height=0, color='white', alpha=0.75) self.data_ax.add_patch(self.data_selection) self.data_ax.set_xlim(self.raw_lags[0], self.raw_lags[-1]) self.data_ax.set_ylim(0, len(self.sort_idcs)+1) self.data_ax.set_yticks([]) self.ui.data_overview.draw()
Example 44
def update_time(self): if self.show_fit: self.t_start = min(self.maxtime, self.get_time.value()) self.t_stop = self.t_start + 1 if self.t_stop > self.maxtime: self.t_stop = self.maxtime self.get_data() self.update_data_plot()
Example 45
def on_mouse_press(self, event): if event.inaxes == self.electrode_ax: if self.ui.btn_lasso.isChecked(): # Select multiple points self.start_lasso_select(event) elif self.ui.btn_rectangle.isChecked(): pass # handled already by rect selector elif self.ui.btn_picker.isChecked(): # Select a single point for display # Transform data coordinates to display coordinates x = self.x_position y = self.y_position data = event.inaxes.transData.transform(zip(x, y)) # Find the closest point distances = ((data[:, 0] - event.x)**2 + (data[:, 1] - event.y)**2) min_idx, min_value = np.argmin(distances), np.min(distances) if min_value > 50: # Don't select anything if the mouse cursor is more than # 50 pixels away from a point selection = {} else: selection = {min_idx} add_or_remove = None if event.key == 'shift': add_or_remove = 'add' elif event.key == 'control': add_or_remove = 'remove' self.update_inspect(selection, add_or_remove) else: raise AssertionError('No tool active') else: return
Example 46
def compute_log_sum(val): min_val = np.min(val, axis=0, keepdims=True) return np.mean(min_val - np.log(np.mean(np.exp(-val + min_val), axis=0)))
Example 47
def eigenDecompose(self, X, K, normalize=True): if (X.shape[1] >= X.shape[0]): s,U = la.eigh(K) else: U, s, _ = la.svd(X, check_finite=False, full_matrices=False) if (s.shape[0] < U.shape[1]): s = np.concatenate((s, np.zeros(U.shape[1]-s.shape[0]))) #note: can use low-rank formulas here s=s**2 if normalize: s /= float(X.shape[1]) if (np.min(s) < -1e-10): raise Exception('Negative eigenvalues found') s[s<0]=0 ind = np.argsort(s)[::-1] U = U[:, ind] s = s[ind] return s,U
Example 48
def random_channel_shift(x, intensity, channel_axis=0): x = np.rollaxis(x, channel_axis, 0) min_x, max_x = np.min(x), np.max(x) channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x) for x_channel in x] x = np.stack(channel_images, axis=0) x = np.rollaxis(x, 0, channel_axis + 1) return x
Example 49
def cut_out_non_lungs_z (images3, pmasks3, images3_seg, uid, dim): HU_LUNGS_MIN = -900 # the algo is sensitive to this value -- keep it 900 unless retested HU_LUNGS_MAX = -400 pix_lungs_min = hu_to_pix(HU_LUNGS_MIN) pix_lungs_max = hu_to_pix(HU_LUNGS_MAX) mid = dim // 2 ymin = int(0.4 * images3.shape[3]) ## BUG was 4 ymax = int(0.6 * images3.shape[3]) ## # waut it failed for tne one following 4b351d0c19be183cc880f5af3fe5abee ( index 240 is out of bounds for axis 3 with size 240) zmin_new = images3.shape[0] // 2 zmax_new = images3.shape[0] // 2 j = ymin for j in range(ymin, ymax+1): img_cut = images3[:,0,mid, j] img_cut_lungs = (img_cut > pix_lungs_min) & (img_cut < pix_lungs_max) lungs_across = np.sum(img_cut_lungs, axis = 1) noise_bottom_some = np.mean(lungs_across[0:10]) # increase by 2 noise = np.max([3*np.min(lungs_across), 0.05 * np.max(lungs_across), noise_bottom_some]) # experimanetal -- could fail is scan has only central part of lungs and no borders at all -- CHECK zmin, zmax = find_lungs_range(lungs_across, noise) if zmin < zmin_new: zmin_new = zmin if zmax > zmax_new: #print ("j, zmax: ", j, zmax) zmax_new = zmax ### do not cut it to fine (add few pixels on each side ...) zmin_new = np.max([0, zmin_new-mid]) zmax_new = np.min([images3.shape[0], zmax_new+mid]) print("cut_out_non_lungs_z from to:", images3.shape[0], zmin_new, zmax_new, uid ) if ((zmax_new-zmin_new)/images3.shape[0] < 0.5): print ("SUSPICSIOUS large cut of > 50%, NOT executing ...") else: images3 = images3[zmin_new:zmax_new] pmasks3 = pmasks3[zmin_new:zmax_new] images3_seg = images3_seg[zmin_new:zmax_new] return images3, pmasks3, images3_seg
Example 50
def threshold_from_data(self, X, y): y_bool = y == 1. ## true if x is a catast y_pred = self.predict_proba(X) if np.count_nonzero(y) == 0: return np.max(y_pred) return np.min(y_pred[y_bool][:,1]) # TODO CHANGED FROM WILL CODE