The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def normalize_array (solution, prediction): ''' Use min and max of solution as scaling factors to normalize prediction, then threshold it to [0, 1]. Binarize solution to {0, 1}. This allows applying classification scores to all cases. In principle, this should not do anything to properly formatted classification inputs and outputs.''' # Binarize solution sol=np.ravel(solution) # convert to 1-d array maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf if maxi == mini: print('Warning, cannot normalize') return [solution, prediction] diff = maxi - mini mid = (maxi + mini)/2. new_solution = np.copy(solution) new_solution[solution>=mid] = 1 new_solution[solution<mid] = 0 # Normalize and threshold predictions (takes effect only if solution not in {0, 1}) new_prediction = (np.copy(prediction) - float(mini))/float(diff) new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1] new_prediction[new_prediction<0] = 0 # Make probabilities smoother #new_prediction = np.power(new_prediction, (1./10)) return [new_solution, new_prediction]
Example 2
def logscale_img(img_array, cap=255.0, coeff=1000.0): ''' This scales the image according to the relation: logscale_img = np.log(coeff*(img/max(img))+1)/np.log(coeff) Taken from the DS9 scaling algorithms page at: http://hea-www.harvard.edu/RD/ds9/ref/how.html According to that page: coeff = 1000.0 works well for optical images coeff = 100.0 works well for IR images ''' logscaled_img = np.log(coeff*img_array/np.nanmax(img_array)+1)/np.log(coeff) return cap*logscaled_img
Example 3
def _learn_from_memories(self, replay_memories, q_network, global_step): if self._pre_learning_stage(global_step): loss = 0.0 return loss sampled_replay_memories = replay_memories.sample(sample_size=self.hyperparameters.REPLAY_MEMORIES_TRAIN_SAMPLE_SIZE, recent_memories_span=self.hyperparameters.REPLAY_MEMORIES_RECENT_SAMPLE_SPAN) consequent_states = [replay_memory.consequent_state for replay_memory in sampled_replay_memories] max_q_consequent_states = np.nanmax(q_network.forward_pass_batched(consequent_states), axis=1) train_bundles = [None] * self.hyperparameters.REPLAY_MEMORIES_TRAIN_SAMPLE_SIZE discount_factor = self.hyperparameters.Q_UPDATE_DISCOUNT_FACTOR for idx, replay_memory in enumerate(sampled_replay_memories): target_action_q_value = float(self._q_target(replay_memory=replay_memory, max_q_consequent_state=max_q_consequent_states[idx], discount_factor=discount_factor)) train_bundles[idx] = q_network.create_train_bundle(state=replay_memory.initial_state, action_index=replay_memory.action_index, target_action_q_value=target_action_q_value) loss = q_network.train(train_bundles, global_step - self.hyperparameters.REPLAY_MEMORIES_MINIMUM_SIZE_FOR_LEARNING) return loss
Example 4
def normalize_array (solution, prediction): ''' Use min and max of solution as scaling factors to normalize prediction, then threshold it to [0, 1]. Binarize solution to {0, 1}. This allows applying classification scores to all cases. In principle, this should not do anything to properly formatted classification inputs and outputs.''' # Binarize solution sol=np.ravel(solution) # convert to 1-d array maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf if maxi == mini: print('Warning, cannot normalize') return [solution, prediction] diff = maxi - mini mid = (maxi + mini)/2. new_solution = np.copy(solution) new_solution[solution>=mid] = 1 new_solution[solution<mid] = 0 # Normalize and threshold predictions (takes effect only if solution not in {0, 1}) new_prediction = (np.copy(prediction) - float(mini))/float(diff) new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1] new_prediction[new_prediction<0] = 0 # Make probabilities smoother #new_prediction = np.power(new_prediction, (1./10)) return [new_solution, new_prediction]
Example 5
def normalize_array (solution, prediction): ''' Use min and max of solution as scaling factors to normalize prediction, then threshold it to [0, 1]. Binarize solution to {0, 1}. This allows applying classification scores to all cases. In principle, this should not do anything to properly formatted classification inputs and outputs.''' # Binarize solution sol=np.ravel(solution) # convert to 1-d array maxi = np.nanmax((filter(lambda x: x != float('inf'), sol))) # Max except NaN and Inf mini = np.nanmin((filter(lambda x: x != float('-inf'), sol))) # Mini except NaN and Inf if maxi == mini: print('Warning, cannot normalize') return [solution, prediction] diff = maxi - mini mid = (maxi + mini)/2. new_solution = np.copy(solution) new_solution[solution>=mid] = 1 new_solution[solution<mid] = 0 # Normalize and threshold predictions (takes effect only if solution not in {0, 1}) new_prediction = (np.copy(prediction) - float(mini))/float(diff) new_prediction[new_prediction>1] = 1 # and if predictions exceed the bounds [0, 1] new_prediction[new_prediction<0] = 0 # Make probabilities smoother #new_prediction = np.power(new_prediction, (1./10)) return [new_solution, new_prediction]
Example 6
def _get_max_sigma(self, R): """Calculate maximum sigma of scanner RAS coordinates Parameters ---------- R : 2D array, with shape [n_voxel, n_dim] The coordinate matrix of fMRI data from one subject Returns ------- max_sigma : float The maximum sigma of scanner coordinates. """ max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2) return max_sigma
Example 7
def sanitize_array(array): """ Replace NaN and Inf (there should not be any!) :param array: :return: """ a = np.ravel(array) #maxi = np.nanmax((filter(lambda x: x != float('inf'), a)) # ) # Max except NaN and Inf #mini = np.nanmin((filter(lambda x: x != float('-inf'), a)) # ) # Mini except NaN and Inf maxi = np.nanmax(a[np.isfinite(a)]) mini = np.nanmin(a[np.isfinite(a)]) array[array == float('inf')] = maxi array[array == float('-inf')] = mini mid = (maxi + mini) / 2 array[np.isnan(array)] = mid return array
Example 8
def check_move_neighborhood(self, mask): """Given a mask block, check if any central voxels meet move threshold. Checks whether a cube one move in each direction from the mask center contains any probabilities greater than the move threshold. Parameters ---------- mask : ndarray Block of mask probabilities, usually of the shape specified by the configured ``output_fov_shape``. Returns ------- bool """ ctr = np.asarray(mask.shape) // 2 neigh_min = ctr - self.MOVE_DELTA neigh_max = ctr + self.MOVE_DELTA + 1 neighborhood = mask[map(slice, neigh_min, neigh_max)] return np.nanmax(neighborhood) >= CONFIG.model.t_move
Example 9
def evaluateToGT(self, Li, idxs): """ Evaluate the current estimate to a ground truth :param Li: current estimates :param idxs: idxs to evaluate :return: mean error, max error and MD score """ if not isinstance(idxs, numpy.ndarray): idxs = numpy.asarray(idxs) if self.gt3D is not None: gt3D_subset = self.gt3D[idxs] if Li.shape[0] == len(idxs): Li_subset = Li else: Li_subset = Li[idxs] mean_error = numpy.mean(numpy.sqrt(numpy.square((gt3D_subset - Li_subset.reshape(gt3D_subset.shape))*self.Di_scale[idxs, None, None]).sum(axis=2)), axis=1).mean() max_error = numpy.max(numpy.sqrt(numpy.square((gt3D_subset - Li_subset.reshape(gt3D_subset.shape))*self.Di_scale[idxs, None, None]).sum(axis=2))) vals = [(numpy.nanmax(numpy.sqrt(numpy.square((gt3D_subset - Li_subset.reshape(gt3D_subset.shape))*self.Di_scale[idxs, None, None]).sum(axis=2)), axis=1) <= j).sum() / float(gt3D_subset.shape[0]) for j in range(0, 80)] md_score = numpy.asarray(vals).sum() / float(80.) return mean_error, max_error, md_score else: return 0., 0., 0.
Example 10
def min_max(self, mask=None): """Get the minimum and maximum value in this data. If a mask is provided we get the min and max value within the given mask. Infinities and NaN's are ignored by this algorithm. Args: mask (ndarray): the mask, we only include elements for which the mask > 0 Returns: tuple: (min, max) the minimum and maximum values """ if mask is not None: roi = mdt.create_roi(self.data, mask) return np.nanmin(roi), np.nanmax(roi) return np.nanmin(self.data), np.nanmax(self.data)
Example 11
def test_extrema(): for nprocs in [1, 2, 4, 8]: ds = fake_random_ds(16, nprocs = nprocs, fields = ("density", "velocity_x", "velocity_y", "velocity_z")) for sp in [ds.sphere("c", (0.25, 'unitary')), ds.r[0.5,:,:]]: mi, ma = sp.quantities["Extrema"]("density") assert_equal(mi, np.nanmin(sp["density"])) assert_equal(ma, np.nanmax(sp["density"])) dd = ds.all_data() mi, ma = dd.quantities["Extrema"]("density") assert_equal(mi, np.nanmin(dd["density"])) assert_equal(ma, np.nanmax(dd["density"])) sp = ds.sphere("max", (0.25, 'unitary')) assert_equal(np.any(np.isnan(sp["radial_velocity"])), False) mi, ma = dd.quantities["Extrema"]("radial_velocity") assert_equal(mi, np.nanmin(dd["radial_velocity"])) assert_equal(ma, np.nanmax(dd["radial_velocity"]))
Example 12
def mospat_plot_profile(f_Var, f_Height, c_Var, c_Net, c_Stn, t_ObsStationVerticalData): f_lat = t_ObsStationVerticalData[c_Net][c_Stn]['f_Lat'] f_lon = t_ObsStationVerticalData[c_Net][c_Stn]['f_Lon'] f_Elev = t_ObsStationVerticalData[c_Net][c_Stn]['f_Elevation'] f_hmax = 5000. if np.nanmax(f_Height) >= f_hmax: i_hmax = np.where(np.array(f_Height) <= np.array(f_hmax))[0][-1] f_Height = f_Height[:i_hmax] f_Var = f_Var[:i_hmax] c_var_label = ConfP.mospat_config_labels(c_Var) c_coord = ' [lat=%0.2f lon=%0.2f elev=%0.2f]' % (f_lat, f_lon, f_Elev) fig, ax = plt.subplots(figsize=(ConfP.f_PFSize[0], ConfP.f_PFSize[1])) line = ax.plot(f_Var, f_Height / 1000., color=ConfP.c_ObsColor[0], marker=ConfP.c_ObsMarker, linestyle=ConfP.c_ObsLine[0], label=c_Net) ax.set_title(c_Stn + c_coord) ax.set_ylabel('Height [km]') ax.set_xlabel(c_var_label) ax.set_ylim([0, np.nanmax(f_Height / 1000.)]) return fig, ax, line
Example 13
def add_image(self, image): """ This function ... :param image: :return: """ # Create an animation to show the result of the source extraction step if self.max_frame_value is None: self.max_frame_value = np.nanmax(image) # Make a plot of the image buf = io.BytesIO() plotting.plot_box(image, path=buf, format="png", vmin=0.0, vmax=0.5*self.max_frame_value) buf.seek(0) im = imageio.imread(buf) buf.close() self.add_frame(im) # -----------------------------------------------------------------
Example 14
def inpaint_biharmonic(frame, mask): """ This function ... :param frame: :param mask: :return: """ maximum = np.nanmax(frame) normalized = frame / maximum data = inpaint.inpaint_biharmonic(normalized, mask, multichannel=False) return data * maximum # ----------------------------------------------------------------- # TODO: create a better inpainting function. OpenCV has one, but this is a terrible dependency because it's hard to # install. Options: # - The below replace_nans function can be replaced by the more up to date version at: # https://github.com/OpenPIV/openpiv-python/blob/master/openpiv/src/lib.pyx # We may want to keep it in cython so that it runs faster. However, this original does not have the inverse distance # weighing as in the code below, but we can maybe add this ourselves in the cython code # - Write our own code. # SOLUTION: SEE FUNCTION ABOVE, GENERALLY, IT IS MUCH BETTER
Example 15
def add_image(self, image): """ This function ... :param image: :return: """ # Create an animation to show the result of the source extraction step if self.max_frame_value is None: self.max_frame_value = np.nanmax(image) # Make a plot of the image buf = io.BytesIO() plotting.plot_box(image, path=buf, format="png", vmin=0.0, vmax=0.5*self.max_frame_value) buf.seek(0) im = imageio.imread(buf) buf.close() self.add_frame(im) # -----------------------------------------------------------------
Example 16
def inpaint_biharmonic(frame, mask): """ This function ... :param frame: :param mask: :return: """ maximum = np.nanmax(frame) normalized = frame / maximum data = inpaint.inpaint_biharmonic(normalized, mask, multichannel=False) return data * maximum # ----------------------------------------------------------------- # TODO: create a better inpainting function. OpenCV has one, but this is a terrible dependency because it's hard to # install. Options: # - The below replace_nans function can be replaced by the more up to date version at: # https://github.com/OpenPIV/openpiv-python/blob/master/openpiv/src/lib.pyx # We may want to keep it in cython so that it runs faster. However, this original does not have the inverse distance # weighing as in the code below, but we can maybe add this ourselves in the cython code # - Write our own code. # SOLUTION: SEE FUNCTION ABOVE, GENERALLY, IT IS MUCH BETTER
Example 17
def _calc(arr, out, ksize): gx = arr.shape[0] gy = arr.shape[1] for i in range(gx): for j in range(gy): xmn = i-ksize if xmn < 0: xmn = 0 xmx = i+ksize if xmx > gx: xmx = gx ymn = j-ksize if ymn < 0: ymn = 0 ymx = j+ksize if ymx > gy: ymx = gy out[i,j] = np.nanmax(arr[xmn:xmx,ymn:ymx])
Example 18
def local_entropy(ocl_ctx, img, window_radius, num_bins=8): """ compute local entropy using a sliding window """ mf = cl.mem_flags cl_queue = cl.CommandQueue(ocl_ctx) img_np = np.array(img).astype(np.float32) img_buf = cl.Buffer(ocl_ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=img_np) min_val = np.nanmin(img) max_val = np.nanmax(img) entropy = np.zeros_like(img,dtype=np.float32) dest_buf = cl.Buffer(ocl_ctx, mf.WRITE_ONLY, entropy.nbytes) cl_dir = os.path.dirname(__file__) cl_filename = cl_dir + '/cl/local_entropy.cl' with open(cl_filename, 'r') as fd: clstr = fd.read() prg = cl.Program(ocl_ctx, clstr).build() prg.local_entropy(cl_queue, entropy.shape, None, img_buf, dest_buf, np.int32(img.shape[1]), np.int32(img.shape[0]), np.int32(window_radius), np.int32(num_bins), np.float32(min_val), np.float32(max_val)) cl.enqueue_copy(cl_queue, entropy, dest_buf) cl_queue.finish() return entropy
Example 19
def minmax(X): """ Returns the MinMax Semivariance of sample X. X has to be an even-length array of point pairs like: x1, x1+h, x2, x2+h, ..., xn, xn+h. :param X: :return: """ _X = np.asarray(X) if any([isinstance(_, list) or isinstance(_, np.ndarray) for _ in _X]): return [minmax(_) for _ in _X] # check even if len(_X) % 2 > 0: raise ValueError('The sample does not have an even length: {}'.format(_X)) return (np.nanmax(_X) - np.nanmin(_X)) / np.nanmean(_X)
Example 20
def test_FmtHeatmap__get_min_max_from_selected_cell_values_with_cache(): df_pn = df - 5. cache = {} fmt = pbtf.FmtHeatmap(cache=cache) res = fmt._get_min_max_from_selected_cell_values(None, None, df_pn) assert len(cache) == 1 and (None, None) in cache.keys() assert res == (np.nanmin(df_pn), np.nanmax(df_pn)) min_value, max_value = np.nanmin(df.loc[['a'], ['aa', 'bb']]), np.nanmax(df.loc[['a'], ['aa', 'bb']]) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert len(cache) == 2 and (frozenset(['a']), frozenset(['aa', 'bb'])) in cache.keys() assert res == (min_value, max_value) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert len(cache) == 2 and (frozenset(['a']), frozenset(['aa', 'bb'])) in cache.keys() assert res == (min_value, max_value)
Example 21
def test_FmtHeatmap__get_min_max_from_selected_cell_values_without_cache(): df_pn = df - 5. cache = None fmt = pbtf.FmtHeatmap(cache=cache) res = fmt._get_min_max_from_selected_cell_values(None, None, df_pn) assert cache is None assert res == (np.nanmin(df_pn), np.nanmax(df_pn)) min_value, max_value = np.nanmin(df.loc[['a'], ['aa', 'bb']]), np.nanmax(df.loc[['a'], ['aa', 'bb']]) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert cache is None assert res == (min_value, max_value) res = fmt._get_min_max_from_selected_cell_values(['a'], ['aa', 'bb'], df) assert cache is None assert res == (min_value, max_value)
Example 22
def MostUnstable(self, t, p, q, td): #Determine psfc - 300mb minP = p[0] - 300. diff = p - minP ind = np.where(diff > 0) #Determine max theta-e vp = self.VaporPressure(td[ind]) t_lcl = self.TempLCL(t[ind]+273.15,td[ind]) thetae = self.theta[ind] * np.exp((3.376/t_lcl - 0.00254)*1000*q[ind]*(1. + 0.81 * q[ind])) #thetae = self.theta[ind] * np.exp((3036/t_lcl - 1.78)*(q[ind]/1000.)*(1. + (0.448/1000.) * q[ind])) indmax = np.where(thetae == np.nanmax(thetae)) #Define parcel self.t_parcel = t[indmax] self.q_parcel = q[indmax] self.theta_parcel = self.theta[indmax] self.td_parcel = td[indmax] self.p_parcel = p[0] #Lifted Parcel Temperature
Example 23
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5) # stream = open("/home/chentao/depth_test.yaml", "w") # data = {'img':depth_img.tolist()} # yaml.dump(data, stream)
Example 24
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5) # stream = open("/home/chentao/depth_test.yaml", "w") # data = {'img':depth_img.tolist()} # yaml.dump(data, stream)
Example 25
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5)
Example 26
def basemap_raster_mercator(lon, lat, grid, cmin, cmax, cmap_name): # longitude/latitude extent lons = (np.amin(lon), np.amax(lon)) lats = (np.amin(lat), np.amax(lat)) # construct spherical mercator projection for region of interest m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1], llcrnrlon=lons[0],urcrnrlon=lons[1]) #vmin,vmax = np.nanmin(grid),np.nanmax(grid) masked_grid = np.ma.array(grid,mask=np.isnan(grid)) fig = plt.figure(frameon=False,figsize=(12,8),dpi=72) plt.axis('off') cmap = mpl.cm.get_cmap(cmap_name) m.pcolormesh(lon,lat,masked_grid,latlon=True,cmap=cmap,vmin=cmin,vmax=cmax) str_io = StringIO.StringIO() plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True) plt.close() numpy_bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ] float_bounds = [ (float(x), float(y)) for x,y in numpy_bounds ] return str_io.getvalue(), float_bounds
Example 27
def basemap_barbs_mercator(u,v,lat,lon): # lon/lat extents lons = (np.amin(lon), np.amax(lon)) lats = (np.amin(lat), np.amax(lat)) # construct spherical mercator projection for region of interest m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1], llcrnrlon=lons[0],urcrnrlon=lons[1]) #vmin,vmax = np.nanmin(grid),np.nanmax(grid) fig = plt.figure(frameon=False,figsize=(12,8),dpi=72*4) plt.axis('off') m.quiver(lon,lat,u,v,latlon=True) str_io = StringIO.StringIO() plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True) plt.close() numpy_bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ] float_bounds = [ (float(x), float(y)) for x,y in numpy_bounds ] return str_io.getvalue(), float_bounds
Example 28
def _check_vals(self, vals): """TODO Basic check of target elements (sequence of polygons). """ if self.zdata is not None: lyr = self.zdata.src.ds.GetLayerByName('src') lyr.ResetReading() lyr.SetSpatialFilter(None) src_len = lyr.GetFeatureCount() assert len(vals) == src_len, \ "Argument vals must be of length %d" % src_len else: imax = 0 for i in self.ix: mx = np.nanmax(i) if imax < mx: imax = mx assert len(vals) > imax, \ "Argument vals cannot be subscripted by given index values" return vals
Example 29
def _test_covariance_visual(self): cov = self.sc.covariance cov.epsilon = .02 cov.subsampling = 10 # l = self.sc.quadtree.leaves[0] d = [] d.append(('Full', cov._calcCovarianceMatrix(method='full', nthreads=0))) d.append(('Focal', cov._calcCovarianceMatrix(method='focal'))) fig, _ = plt.subplots(1, len(d)) for i, (title, mat) in enumerate(d): print '%s Max %f' % (title, num.nanmax(mat)), mat.shape fig.axes[i].imshow(mat) fig.axes[i].set_title(title) plt.show()
Example 30
def setSymColormap(self): cmap = {'ticks': [[0, (106, 0, 31, 255)], [.5, (255, 255, 255, 255)], [1., (8, 54, 104, 255)]], 'mode': 'rgb'} cmap = {'ticks': [[0, (172, 56, 56)], [.5, (255, 255, 255)], [1., (51, 53, 120)]], 'mode': 'rgb'} lvl_min = lvl_max = 0 for plot in self.plots: plt_min = num.nanmin(plot.data) plt_max = num.nanmax(plot.data) lvl_max = lvl_max if plt_max < lvl_max else plt_max lvl_min = lvl_min if plt_min > lvl_min else plt_min abs_range = max(abs(lvl_min), abs(lvl_max)) self.gradient.restoreState(cmap) self.setLevels(-abs_range, abs_range)
Example 31
def setSymColormap(self): cmap = {'ticks': [[0., (0, 0, 0, 255)], [1e-3, (106, 0, 31, 255)], [.5, (255, 255, 255, 255)], [1., (8, 54, 104, 255)]], 'mode': 'rgb'} cmap = {'ticks': [[0., (0, 0, 0)], [1e-3, (172, 56, 56)], [.5, (255, 255, 255)], [1., (51, 53, 120)]], 'mode': 'rgb'} lvl_min = num.nanmin(self._plot.data) lvl_max = num.nanmax(self._plot.data) abs_range = max(abs(lvl_min), abs(lvl_max)) self.gradient.restoreState(cmap) self.setLevels(-abs_range, abs_range)
Example 32
def setArray(self, incomingArray, copy=False): """ You can use the self.array directly but if you want to copy from one array into a raster we suggest you do it this way :param incomingArray: :return: """ masked = isinstance(self.array, np.ma.MaskedArray) if copy: if masked: self.array = np.ma.copy(incomingArray) else: self.array = np.ma.masked_invalid(incomingArray, copy=True) else: if masked: self.array = incomingArray else: self.array = np.ma.masked_invalid(incomingArray) self.rows = self.array.shape[0] self.cols = self.array.shape[1] self.min = np.nanmin(self.array) self.max = np.nanmax(self.array)
Example 33
def _choose_cov(self, cov_type, **cov_config): """Return covariance estimator reformat clusters""" cov_est = self._cov_estimators[cov_type] if cov_type != 'clustered': return cov_est, cov_config cov_config_upd = {k: v for k, v in cov_config.items()} clusters = cov_config.get('clusters', None) if clusters is not None: clusters = self.reformat_clusters(clusters).copy() cluster_max = np.nanmax(clusters.values3d, axis=1) delta = cluster_max - np.nanmin(clusters.values3d, axis=1) if np.any(delta != 0): raise ValueError('clusters must not vary within an entity') index = clusters.panel.minor_axis reindex = clusters.entities clusters = pd.DataFrame(cluster_max.T, index=index, columns=clusters.vars) clusters = clusters.loc[reindex].astype(np.int64) cov_config_upd['clusters'] = clusters return cov_est, cov_config_upd
Example 34
def get_bbox(self): """ Returns boundary box for the coordinates. Useful for setting up the map extent for plotting on a map. :return tuple: corner coordinates (llcrnrlat, urcrnrlat, llcrnrlon, urcrnrlon) """ x, y, z = zip(self) llcrnrlat = np.nanmin(y) urcrnrlat = np.nanmax(y) llcrnrlon = np.nanmin(x) urcrnrlon = np.nanmax(x) return (llcrnrlat, urcrnrlat, llcrnrlon, urcrnrlon)
Example 35
def plot_counts(ax, data): """ """ hourloc = mpl.dates.HourLocator() xtickformat = mpl.dates.DateFormatter('%H:%M') ax.xaxis.set_major_formatter(xtickformat) ax.xaxis.set_major_locator(hourloc) cnts = data['CPC378_counts'][:] ix = np.where(data['WOW_IND'][:].ravel() == 1)[0] cnts[ix,:] = np.nan ax.plot_date(data['mpl_timestamp'][:].ravel(), cnts.ravel(), '-') ax.set_ylim((0, np.nanmax(cnts))) ax.set_ylabel('#') ax.set_xlabel('Time (utc)') ax.text(0.05, 0.98, 'CPC', axes_title_style, transform=ax.transAxes) return ax
Example 36
def visRenderedViews(self,outDir,nViews=0): pt = Imath.PixelType(Imath.PixelType.FLOAT) renders = sorted(glob.glob(outDir + '/render_*.png')) if (nViews > 0) and (nViews < len(renders)): renders = [renders[ix] for ix in range(nViews)] for render in renders: print render rgbIm = scipy.misc.imread(render) dMap = loadDepth(render.replace('render_','depth_')) plt.figure(figsize=(12,6)) plt.subplot(121) plt.imshow(rgbIm) dMap[dMap>=10] = np.nan plt.subplot(122) plt.imshow(dMap) print(np.nanmax(dMap),np.nanmin(dMap)) plt.show()
Example 37
def derivcum2(segment, config): """ compute the second derivative of the cumulative function using savitzy-golay. Does not modify the segment's stream or traces in-place IMPORTANT NOTES: - As this function is decorated for the gui visualization, any in-place modification to the segment's Stream or any of its Traces will affect subsequent plots -Being decorated with '@gui.sideplot' or '@gui.customplot', this function must return a numeric sequence y taken at successive equally spaced points in any of these forms: - a Trace object - a Stream object - the tuple (x0, dx, y) or (x0, dx, y, label), where - x0 (numeric, `datetime` or `UTCDateTime`) is the abscissa of the first point - dx (numeric or `timedelta`) is the sampling period - y (numpy array or numeric list) are the sequence values - label (string, optional) is the sequence name to be displayed on the plot legend. (if x0 is numeric and `dx` is a `timedelta` object, then x0 will be converted to `UTCDateTime(x0)`; if x0 is a `datetime` or `UTCDateTime` object and `dx` is numeric, then `dx` will be converted to `timedelta(seconds=dx)`) - a dict of any of the above types, where the keys (string) will denote each sequence name to be displayed on the plot legend. :return: the tuple (starttime, timedelta, values) :raise: an Exception if `segment.stream()` is empty or has more than one trace (possible gaps/overlaps) """ cum = cumulative(segment, config) sec_der = savitzky_golay(cum.data, 31, 2, deriv=2) sec_der_abs = np.abs(sec_der) sec_der_abs /= np.nanmax(sec_der_abs) # FIXME: this should be sec_der_abs /= mmm # the stream object has surely only one trace (see 'cumulative') return segment.stream()[0].stats.starttime, segment.stream()[0].stats.delta, sec_der_abs
Example 38
def find_bbox(t): # given a table t find the bounding box of the ellipses for the regions boxes=[] for r in t: a=r['Maj']/scale b=r['Min']/scale th=(r['PA']+90)*np.pi/180.0 dx=np.sqrt((a*np.cos(th))**2.0+(b*np.sin(th))**2.0) dy=np.sqrt((a*np.sin(th))**2.0+(b*np.cos(th))**2.0) boxes.append([r['RA']-dx/np.cos(r['DEC']*np.pi/180.0), r['RA']+dx/np.cos(r['DEC']*np.pi/180.0), r['DEC']-dy, r['DEC']+dy]) boxes=np.array(boxes) minra=np.nanmin(boxes[:,0]) maxra=np.nanmax(boxes[:,1]) mindec=np.nanmin(boxes[:,2]) maxdec=np.nanmax(boxes[:,3]) ra=np.mean((minra,maxra)) dec=np.mean((mindec,maxdec)) size=1.2*3600.0*np.max((maxdec-mindec,(maxra-minra)*np.cos(dec*np.pi/180.0))) return ra,dec,size
Example 39
def VshGR(GRlog,itmin,itmax): # Usando o perfil GR GRmin = np.nanmin(GRlog) GRminInt = GRlog[(GRlog<=(GRmin*(1+itmin/100)))] # Valores do GRmin GRminm = np.mean(GRminInt) # Media dos valores de GRmin GRmax = np.nanmax(GRlog) GRmaxInt = GRlog[(GRlog>=(GRmax*(1-itmax/100)))] # Valores de GRmax GRmaxm = np.mean(GRmaxInt) # Media dos valores de GRmax Vsh = 100*(GRlog-GRminm)/(GRmaxm-GRminm) # Volume de argila for i in range(len(Vsh)): if (Vsh[i] > 100): Vsh[i] = 100 elif (Vsh[i] < 0): Vsh[i] = 0 print GRmin, GRminm, GRmax, GRmaxm, np.nanmin(Vsh), np.nanmax(Vsh) return Vsh
Example 40
def sanitize_array(array): ''' Replace NaN and Inf (there should not be any!)''' a=np.ravel(array) maxi = np.nanmax((filter(lambda x: x != float('inf'), a))) # Max except NaN and Inf mini = np.nanmin((filter(lambda x: x != float('-inf'), a))) # Mini except NaN and Inf array[array==float('inf')]=maxi array[array==float('-inf')]=mini mid = (maxi + mini)/2 array[np.isnan(array)]=mid return array
Example 41
def frame_to_series(self, field, frame, columns=None): """ Convert a frame with a DatetimeIndex and sid columns into a series with a sid index, using the aggregator defined by the given field. """ if isinstance(frame, pd.DataFrame): columns = frame.columns frame = frame.values if not len(frame): return pd.Series( data=(0 if field == 'volume' else np.nan), index=columns, ).values if field in ['price', 'close']: # shortcircuit for full last row vals = frame[-1] if np.all(~np.isnan(vals)): return vals return ffill(frame)[-1] elif field == 'open': return bfill(frame)[0] elif field == 'volume': return np.nansum(frame, axis=0) elif field == 'high': return np.nanmax(frame, axis=0) elif field == 'low': return np.nanmin(frame, axis=0) else: raise ValueError("Unknown field {}".format(field))
Example 42
def quickMinMax(self, data): """ Estimate the min/max values of *data* by subsampling. """ while data.size > 1e6: ax = np.argmax(data.shape) sl = [slice(None)] * data.ndim sl[ax] = slice(None, None, 2) data = data[sl] return nanmin(data), nanmax(data)
Example 43
def dataBounds(self, ax, frac=1.0, orthoRange=None): if frac >= 1.0 and orthoRange is None and self.bounds[ax] is not None: return self.bounds[ax] #self.prepareGeometryChange() if self.data is None or len(self.data) == 0: return (None, None) if ax == 0: d = self.data['x'] d2 = self.data['y'] elif ax == 1: d = self.data['y'] d2 = self.data['x'] if orthoRange is not None: mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1]) d = d[mask] d2 = d2[mask] if frac >= 1.0: self.bounds[ax] = (np.nanmin(d) - self._maxSpotWidth*0.7072, np.nanmax(d) + self._maxSpotWidth*0.7072) return self.bounds[ax] elif frac <= 0.0: raise Exception("Value for parameter 'frac' must be > 0. (got %s)" % str(frac)) else: mask = np.isfinite(d) d = d[mask] return np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])
Example 44
def quickMinMax(self, data): """ Estimate the min/max values of *data* by subsampling. """ while data.size > 1e6: ax = np.argmax(data.shape) sl = [slice(None)] * data.ndim sl[ax] = slice(None, None, 2) data = data[sl] return nanmin(data), nanmax(data)
Example 45
def getMaxError(self): """ get max error over all joints :return: maximum error """ return numpy.nanmax(numpy.sqrt(numpy.square(self.gt - self.joints).sum(axis=2)))
Example 46
def getMaxErrorOverSeq(self): """ get max error over all joints for each image of sequence :return: maximum error """ return numpy.nanmax(numpy.sqrt(numpy.square(self.gt - self.joints).sum(axis=2)), axis=1)
Example 47
def getJointMaxError(self, jointID): """ get maximum error of one joint :param jointID: joint ID :return: maximum joint error """ return numpy.nanmax(numpy.sqrt(numpy.square(self.gt[:, jointID, :] - self.joints[:, jointID, :]).sum(axis=1)))
Example 48
def getNumFramesWithinMaxDist(self, dist): """ calculate the number of frames where the maximum difference of a joint is within dist mm :param dist: distance between joint and GT :return: number of frames """ return (numpy.nanmax(numpy.sqrt(numpy.square(self.gt - self.joints).sum(axis=2)), axis=1) <= dist).sum()
Example 49
def getMDscore(self, dist): """ calculate the max dist score, ie. MD=\int_0^d{\frac{|F<x|}{|F|}dx = \sum :param dist: distance between joint and GT :return: score value [0-1] """ vals = [(numpy.nanmax(numpy.sqrt(numpy.square(self.gt - self.joints).sum(axis=2)), axis=1) <= j).sum() / float(self.joints.shape[0]) for j in range(0, dist)] return numpy.asarray(vals).sum() / float(dist)
Example 50
def normalize_data(self, values): normalized_values = copy.deepcopy(values) data = np.array(values, dtype=float)[:, 0:5] data_min = np.nanmin(data, 0) data_max = np.nanmax(data, 0) print data_min print data_max for i in range(len(values)): for j in range(5): normalized_values[i][j] = np.abs(values[i][j] - data_min[j]) / np.abs(data_max[j] - data_min[j]) return normalized_values, data_min, data_max