The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def xyz_array_to_pointcloud2(points, stamp=None, frame_id=None): ''' Create a sensor_msgs.PointCloud2 from an array of points. ''' msg = PointCloud2() if stamp: msg.header.stamp = stamp if frame_id: msg.header.frame_id = frame_id if len(points.shape) == 3: msg.height = points.shape[1] msg.width = points.shape[0] else: msg.height = 1 msg.width = len(points) msg.fields = [ PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1)] msg.is_bigendian = False msg.point_step = 12 msg.row_step = 12*points.shape[0] msg.is_dense = int(np.isfinite(points).all()) msg.data = np.asarray(points, np.float32).tostring() return msg
Example 2
def check_stoplimit_prices(price, label): """ Check to make sure the stop/limit prices are reasonable and raise a BadOrderParameters exception if not. """ try: if not isfinite(price): raise BadOrderParameters( msg="""Attempted to place an order with a {} price of {}.""".format(label, price) ) # This catches arbitrary objects except TypeError: raise BadOrderParameters( msg="""Attempted to place an order with a {} price of {}.""".format(label, type(price)) ) if price < 0: raise BadOrderParameters( msg="""Can't place a {} order with a negative price.""".format(label) )
Example 3
def get_xyz_points(cloud_array, remove_nans=True): ''' Pulls out x, y, and z columns from the cloud recordarray, and returns a 3xN matrix. ''' # remove crap points if remove_nans: mask = np.isfinite(cloud_array['x']) & np.isfinite(cloud_array['y']) & np.isfinite(cloud_array['z']) cloud_array = cloud_array[mask] # pull out x, y, and z values points = np.zeros(list(cloud_array.shape) + [3], dtype=np.float) points[...,0] = cloud_array['x'] points[...,1] = cloud_array['y'] points[...,2] = cloud_array['z'] return points
Example 4
def _build_gmm(self, data): """ Build gmm from data """ st = time.time() self.gmm = GMM(n_components=self.K, covariance_type='diag') self.gmm.fit(data) # Setup codebook for closest center lookup self.codebook = self.gmm.means_ print 'Vocab construction from data %s (%s KB, %s) => GMM %s took %5.3f s' % \ (data.shape, data.nbytes / 1024, data.dtype, self.gmm.means_.shape, time.time() - st) print 'GMM: %s' % ('GOOD' if np.isfinite(self.gmm.means_).all() else 'BAD') # Save codebook, and index self.index_codebook()
Example 5
def add(self, pts, ids=None, prune=True): # Add only if valid and non-zero if not len(pts): return # Retain valid points valid = np.isfinite(pts).all(axis=1) pts = pts[valid] # ID valid points max_id = np.max(self.ids) + 1 if len(self.ids) else 0 tids = np.arange(len(pts), dtype=np.int64) + max_id if ids is None else ids[valid].astype(np.int64) # Add pts to track for tid, pt in zip(tids, pts): self.tracks_[tid].append(self.index_, pt) # If features are propagated if prune: self.prune() # Frame counter self.index_ += 1
Example 6
def initialize(self, length=None): """see ``__init__``""" if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in range(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in range(length)], copy=False) lb = self._lb ub = self._ub # define added values for lower and upper bound self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
Example 7
def __getitem__(self, item: str) -> Any: if self._query_values or item in self._values: return self._values.get(item) hyperparameter = self.configuration_space._hyperparameters[item] item_idx = self.configuration_space._hyperparameter_idx[item] if not np.isfinite(self._vector[item_idx]): raise KeyError() value = hyperparameter._transform(self._vector[item_idx]) # Truncate the representation of the float to be of constant # length for a python version if isinstance(hyperparameter, FloatHyperparameter): value = float(repr(value)) # TODO make everything faster, then it'll be possible to init all values # at the same time and use an OrderedDict instead of only a dict here to # support iterating that dict in the same order as the actual order of # hyperparameters self._values[item] = value return self._values[item]
Example 8
def test_posterior_zeros(self): p = np.asarray([.5, 0., 0.]).reshape((1, 3)) posterior = self.eval(self.posterior, p) print 'posterior', posterior posterior_grad = self.eval(self.posterior_grad, p) print 'posterior grad', posterior_grad kl = self.eval(self.posterior_kl, p) print kl self.assertGreater(kl.sum(), 0) self.assertFalse(np.isnan(kl).any()) self.assertTrue(np.isfinite(kl).all()) grad = self.eval(self.posterior_kl_grad, p) print grad self.assertFalse(np.isnan(grad).any()) self.assertTrue(np.isfinite(grad).all())
Example 9
def to_cartesian(r_dev, pos, normal): """ Transform radial deviations from an ellipsoidal grid to Cartesian Parameters ---------- r_dev : ndarray, shape (N, ) Array containing the N radial deviations from the ellipse. r < 0 means inside the ellipse. pos : ndarray, shape (2, N) The N (y, x) positions of the ellipse (as given by ``ellipse_grid``) normal : ndarray, shape (2, N) The N (y, x) unit normals of the ellipse (as given by ``ellipse_grid``) """ coord_new = pos + r_dev * normal coord_new = coord_new[:, np.isfinite(coord_new).all(0)] return coord_new
Example 10
def estimate_theta(self, samples): ''' Estimates the theta parameters from the given samples. Parameters ---------- samples : array_like n-by-2 matrix of samples where n is the number of samples. ''' if self.theta is not None: bnds = self.theta_bounds() def cost(theta): ''' Calculates the cost of a given `theta` parameter. ''' self.theta = np.asarray(theta) vals = self.logpdf(samples) # For optimization, filter out inifinity values return -np.sum(vals[np.isfinite(vals)]) result = minimize(cost, self.theta, method='TNC', bounds=bnds) self.theta = result.x
Example 11
def test_complex_nan_comparisons(): nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), complex(1, 1), complex(-1, -1), complex(0, 0)] with np.errstate(invalid='ignore'): for x in nans + fins: x = np.array([x]) for y in nans + fins: y = np.array([y]) if np.isfinite(x) and np.isfinite(y): continue assert_equal(x < y, False, err_msg="%r < %r" % (x, y)) assert_equal(x > y, False, err_msg="%r > %r" % (x, y)) assert_equal(x <= y, False, err_msg="%r <= %r" % (x, y)) assert_equal(x >= y, False, err_msg="%r >= %r" % (x, y)) assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
Example 12
def __ipow__(self, other): """ Raise self to the power other, in place. """ other_data = getdata(other) other_mask = getmask(other) with np.errstate(divide='ignore', invalid='ignore'): self._data.__ipow__(np.where(self._mask, self.dtype.type(1), other_data)) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): if self._mask is not nomask: self._mask |= invalid else: self._mask = invalid np.copyto(self._data, self.fill_value, where=invalid) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) return self
Example 13
def step(self, action): self.forward_dynamics(action) comvel = self.get_body_comvel("torso") forward_reward = comvel[0] lb, ub = self.action_bounds scaling = (ub - lb) * 0.5 ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling)) contact_cost = 0.5 * 1e-3 * np.sum( np.square(np.clip(self.model.data.cfrc_ext, -1, 1))), survive_reward = 0.05 reward = forward_reward - ctrl_cost - contact_cost + survive_reward state = self._state notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0 done = not notdone ob = self.get_current_obs() return Step(ob, float(reward), done)
Example 14
def _preprocess(t, v): """ Raises and exception if any of the inputs are not valid. Otherwise, returns a list of Points, ordered by t. """ # Validate the inputs. if len(t) != len(v): raise ValueError('`t` and `v` must have the same length.') t_arr, v_arr = np.array(t), np.array(v) if not np.all(np.isfinite(t)): raise ValueError('All values in `t` must be finite.') finite_mask = np.isfinite(v_arr) if np.sum(finite_mask) < 2: raise ValueError('`v` must have at least 2 finite values.') t_arr, v_arr = t_arr[finite_mask], v_arr[finite_mask] if len(np.unique(t_arr)) != len(t_arr): raise ValueError('All `t` values must be unique.') # Order both arrays by t-values. sort_order = np.argsort(t_arr) t_arr, v_arr = t_arr[sort_order], v_arr[sort_order] return t_arr, v_arr
Example 15
def step(self, action): self.forward_dynamics(action) comvel = self.get_body_comvel("torso") forward_reward = comvel[0] lb, ub = self.action_bounds scaling = (ub - lb) * 0.5 ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling)) contact_cost = 0.5 * 1e-3 * np.sum( np.square(np.clip(self.model.data.cfrc_ext, -1, 1))), survive_reward = 0.05 reward = forward_reward - ctrl_cost - contact_cost + survive_reward state = self._state notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0 done = not notdone ob = self.get_current_obs() return Step(ob, float(reward), done)
Example 16
def initialize(self, length=None): """see ``__init__``""" if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in range(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in range(length)], copy=False) lb = self._lb ub = self._ub # define added values for lower and upper bound self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
Example 17
def _zscore(a): """ Calculating z-score of data on the first axis. If the numbers in any column are all equal, scipy.stats.zscore will return NaN for this column. We shall correct them all to be zeros. Parameters ---------- a: numpy array Returns ------- zscore: numpy array The z-scores of input "a", with any columns including non-finite numbers replaced by all zeros. """ assert a.ndim > 1, 'a must have more than one dimensions' zscore = scipy.stats.zscore(a, axis=0) zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0 return zscore
Example 18
def test_funcs(self): data = self.data['binary'] truth = self.truths['binary'] nlp = self.truths_to_nlp(truth) params = self.joker_params['binary'] p = np.concatenate((nlp, [truth['K'].value], [self.fd.v0.value])) mcmc_p = to_mcmc_params(p) p2 = from_mcmc_params(mcmc_p) assert np.allclose(p, p2.reshape(p.shape)) # test roundtrip lp = ln_prior(p, params) assert np.isfinite(lp) ll = ln_likelihood(p, params, data) assert np.isfinite(ll).all() # remove jitter from params passed in to mcmc_p mcmc_p = list(mcmc_p) mcmc_p.pop(5) # log-jitter is 5th index in mcmc packed lnpost = ln_posterior(mcmc_p, params, data) assert np.isfinite(lnpost) assert np.allclose(lnpost, lp+ll.sum())
Example 19
def test_sample_prior(self): rnd1 = np.random.RandomState(42) joker1 = TheJoker(self.joker_params['binary'], random_state=rnd1) rnd2 = np.random.RandomState(42) joker2 = TheJoker(self.joker_params['triple'], random_state=rnd2) samples1 = joker1.sample_prior(8) samples2 = joker2.sample_prior(8) for key in samples1.keys(): assert quantity_allclose(samples1[key], samples2[key]) samples, ln_vals = joker2.sample_prior(8, return_logprobs=True) assert np.isfinite(ln_vals).all()
Example 20
def get_blazar_redshifts(blazar_type): table = Table.read(filename, hdu='LAT_Point_Source_Catalog') known_redshift_mask = np.isfinite(table['Redshift']) known_redshift_table = table[known_redshift_mask] if blazar_type == "bll": class_1 = known_redshift_table['CLASS'] == "bll " class_2 = known_redshift_table['CLASS'] == "BLL " if blazar_type == "fsrq": class_1 = known_redshift_table['CLASS'] == "fsrq " class_2 = known_redshift_table['CLASS'] == "FSRQ " if blazar_type == "bcu": class_1 = known_redshift_table['CLASS'] == "bcu " class_2 = known_redshift_table['CLASS'] == "BCU " class_type_mask = np.logical_or.reduce((class_1, class_2)) sub_table = known_redshift_table[class_type_mask] return sub_table["Redshift"]
Example 21
def get_data(self, element, ranges, style): if self.geographic: vdim = element.vdims[0] if element.vdims else None value = element.level if vdim is not None and (value is not None and np.isfinite(value)): self._norm_kwargs(element, ranges, style, vdim) style['clim'] = style.pop('vmin'), style.pop('vmax') style['array'] = np.array([value]) return ([element.data], element.crs), style, {} else: SkipRendering('Shape can only be plotted on geographic plot, ' 'supply a coordinate reference system.') ######################################## # Geographic features and annotations # ########################################
Example 22
def get_extents(self, element, ranges): """ Subclasses the get_extents method using the GeoAxes set_extent method to project the extents to the Elements coordinate reference system. """ extents = super(GeoPlot, self).get_extents(element, ranges) if not getattr(element, 'crs', None) or not self.geographic: return extents elif any(e is None or not np.isfinite(e) for e in extents): extents = None else: try: extents = project_extents(extents, element.crs, DEFAULT_PROJ) except: extents = None return (np.NaN,)*4 if not extents else extents
Example 23
def test_convert_xy(x, y): assume(x != 0 and y != 0) assume(np.isfinite(x) and np.isfinite(y)) assume(abs(x) < 1E6 and abs(y) < 1E6) assume(abs(x) > 0.01 and abs(y) > 0.01) # Test radians r, theta = to_polar(x, y) x_new, y_new = to_cartesian(r, theta) assert np.allclose(x, x_new) assert np.allclose(y, y_new) # Test degrees r, theta = to_polar(x, y, theta_units="degrees") x_new, y_new = to_cartesian(r, theta, theta_units="degrees") assert np.allclose(x, x_new) assert np.allclose(y, y_new)
Example 24
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: view_target_data (ndarray): N x 3K blob of regression targets view_loss_weights (ndarray): N x 3K blob of loss weights """ view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32) view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32) inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0] for ind in inds: cls = clss[ind] start = 3 * cls end = start + 3 view_targets[ind, start:end] = viewpoint_data[ind, :] view_loss_weights[ind, start:end] = [1., 1., 1.] assert not np.isinf(view_targets).any(), 'viewpoint undefined' return view_targets, view_loss_weights
Example 25
def correlations(A,B,pc_n=100): p = (1 - distance.correlation(A.flatten(),B.flatten())) spear = spearmanr(A.flatten(),B.flatten()) dist_genes = np.zeros(A.shape[0]) for i in range(A.shape[0]): dist_genes[i] = 1 - distance.correlation(A[i],B[i]) pg = (np.average(dist_genes[np.isfinite(dist_genes)])) dist_sample = np.zeros(A.shape[1]) for i in range(A.shape[1]): dist_sample[i] = 1 - distance.correlation(A[:,i],B[:,i]) ps = (np.average(dist_sample[np.isfinite(dist_sample)])) pc_dist = [] if pc_n > 0: u0,s0,vt0 = np.linalg.svd(A) u,s,vt = np.linalg.svd(B) for i in range(pc_n): pc_dist.append(abs(1 - distance.cosine(u0[:,i],u[:,i]))) pc_dist = np.array(pc_dist) return p,spear[0],pg,ps,pc_dist
Example 26
def check_stoplimit_prices(price, label): """ Check to make sure the stop/limit prices are reasonable and raise a BadOrderParameters exception if not. """ try: if not isfinite(price): raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, price) ) # This catches arbitrary objects except TypeError: raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, type(price)) ) if price < 0: raise BadOrderParameters( msg="Can't place a {} order with a negative price.".format(label) )
Example 27
def step(self, action): self.forward_dynamics(action) comvel = self.get_body_comvel("torso") forward_reward = comvel[0] lb, ub = self.action_bounds scaling = (ub - lb) * 0.5 ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling)) contact_cost = 0.5 * 1e-3 * np.sum( np.square(np.clip(self.model.data.cfrc_ext, -1, 1))), survive_reward = 0.05 reward = forward_reward - ctrl_cost - contact_cost + survive_reward state = self._state notdone = np.isfinite(state).all() \ and state[2] >= 0.2 and state[2] <= 1.0 done = not notdone ob = self.get_current_obs() return Step(ob, float(reward), done)
Example 28
def initialize(self, length=None): """see ``__init__``""" if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in range(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in range(length)], copy=False) lb = self._lb ub = self._ub # define added values for lower and upper bound self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
Example 29
def sanitize_array(array): """ Replace NaN and Inf (there should not be any!) :param array: :return: """ a = np.ravel(array) #maxi = np.nanmax((filter(lambda x: x != float('inf'), a)) # ) # Max except NaN and Inf #mini = np.nanmin((filter(lambda x: x != float('-inf'), a)) # ) # Mini except NaN and Inf maxi = np.nanmax(a[np.isfinite(a)]) mini = np.nanmin(a[np.isfinite(a)]) array[array == float('inf')] = maxi array[array == float('-inf')] = mini mid = (maxi + mini) / 2 array[np.isnan(array)] = mid return array
Example 30
def _calculate(self, X, y, categorical, metafeatures, helpers): skews = helpers.get_value("Skewnesses") std = np.nanstd(skews) if len(skews) > 0 else 0 return std if np.isfinite(std) else 0 # @metafeatures.define("cancor1") # def cancor1(X, y): # pass # @metafeatures.define("cancor2") # def cancor2(X, y): # pass ################################################################################ # Information-theoretic metafeatures
Example 31
def getROIHeight(self): """Returns height of ROI. Returns: float: Height of ROI. """ if np.isfinite(self.zmax): zMax=self.zmax else: dump,zMax=self.getMeshIdxZExtend() if np.isfinite(self.zmin): zMin=self.zmin else: zMin,dump=self.getMeshIdxZExtend() return abs(zMax-zMin)
Example 32
def test_contextual_optimizers_follow_standard_protocol(): for name, ContextualOptimizer in ALL_CONTEXTUALOPTIMIZERS: opt = ContextualOptimizer() n_params = 1 n_context_dims = 1 opt.init(n_params, n_context_dims) context = opt.get_desired_context() if context is None: context = np.zeros(n_context_dims) opt.set_context(context) assert_false(opt.is_behavior_learning_done()) params = np.empty(n_params) opt.get_next_parameters(params) assert_true(np.isfinite(params).all()) opt.set_evaluation_feedback(np.array([0.0])) policy = opt.best_policy() assert_true(np.isfinite(policy(context)).all()) assert_pickle(name, opt)
Example 33
def test_random_behavior(): beh = RandomBehavior(random_state=0) beh.init(4, 5) assert_equal(beh.get_n_params(), 0) assert_array_equal(beh.get_params(), np.array([])) outputs = np.empty(5) outputs[:] = np.nan beh.get_outputs(outputs) assert_true(np.isfinite(outputs).all()) assert_raises_regexp( NotImplementedError, "does not accept any meta parameters", beh.set_meta_parameters, ["key"], [0.0]) beh.reset() assert_raises_regexp( ValueError, "Length of parameter vector must be 0", beh.set_params, np.zeros(2)) beh.set_params(np.array([]))
Example 34
def get_bon_thresh(normalized,power): #same """ Calculate the bonferroni correction threshold. Divide the power by the sum of all finite values (all non-nan values). :param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value. :param power: the threshold power being used (usually 0.05) :type normalized: numpy array :type power: float :returns: The bonferroni correction :rtype: float """ return power/sum(np.isfinite(normalized))
Example 35
def get_fdr_thresh(p_values, power): """ Calculate the false discovery rate threshold. :param p_values: a list of p-values obtained by executing the regression :param power: the thershold power being used (usually 0.05) :type p_values: numpy array :type power: float :returns: the false discovery rate :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] sn = sn[::-1] for i in range(len(sn)): thresh=0.05*i/len(sn) if sn[i]<=power: break return sn[i]
Example 36
def get_bon_thresh(normalized, power): # same """ Calculate the bonferroni correction threshold. Divide the power by the sum of all finite values (all non-nan values). :param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value. :param power: the threshold power being used (usually 0.05) :type normalized: numpy array :type power: float :returns: The bonferroni correction :rtype: float """ return power / sum(np.isfinite(normalized))
Example 37
def get_fdr_thresh(p_values, power): """ Calculate the false discovery rate threshold. :param p_values: a list of p-values obtained by executing the regression :param power: the thershold power being used (usually 0.05) :type p_values: numpy array :type power: float :returns: the false discovery rate :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] sn = sn[::-1] for i in range(len(sn)): thresh = 0.05 * i / len(sn) if sn[i] <= power: break return sn[i]
Example 38
def censor_diagnosis(genotype_file,phenotype_file,final_pfile, field ='na',start_time=float('nan'),end_time=float('nan')): import pandas as pd import numpy as np genotypes = pd.read_csv(genotype_file) phenotypes = pd.read_csv(phenotype_file) mg=pd.merge(phenotypes,genotypes,on='id') if np.isnan(start_time) and np.isnan(end_time): print("Choose appropriate time period") if field=='na': if np.isfinite(start_time) and np.isnan(end_time): final = mg[mg['AgeAtICD']>start_time] elif np.isnan(start_time) and np.isfinite(end_time): final = mg[mg['AgeAtICD']<end_time] else: final = mg[(mg['AgeAtICD']>start_time)&(mg['AgeAtICD']<end_time)] else: mg['diff']=mg[field]-mg['AgeAtICD'] if np.isfinite(start_time) and np.isnan(end_time): final = mg[(mg['diff']>start_time)|(np.isnan(mg['diff']))] elif np.isnan(start_time) and np.isfinite(end_time): final = mg[(mg['diff']<end_time)|(np.isnan(mg['diff']))] else: final = mg[(mg['diff']>start_time)&(mg['diff']<end_time)|(np.isnan(mg['diff']))] final[['id','icd9','AgeAtICD']].to_csv(final_pfile)
Example 39
def get_fdr_thresh(p_values, power): """ Calculate the false discovery rate threshold. :param p_values: a list of p-values obtained by executing the regression :param power: the thershold power being used (usually 0.05) :type p_values: numpy array :type power: float :returns: the false discovery rate :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] sn = sn[::-1] for i in range(len(sn)): thresh = power * i / len(sn) if sn[i] <= thresh: break return sn[i]
Example 40
def get_bhy_thresh(p_values, power): """ Calculate the false discovery rate threshold. :param p_values: a list of p-values obtained by executing the regression :param power: the thershold power being used (usually 0.05) :type p_values: numpy array :type power: float :returns: the false discovery rate :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] sn = sn[::-1] for i in range(len(sn)): thresh = power * i / (8.1*len(sn)) if sn[i] <= thresh: break return sn[i]
Example 41
def get_bon_thresh(normalized, power): # same """ Calculate the bonferroni correction threshold. Divide the power by the sum of all finite values (all non-nan values). :param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value. :param power: the threshold power being used (usually 0.05) :type normalized: numpy array :type power: float :returns: The bonferroni correction :rtype: float """ return power / sum(np.isfinite(normalized))
Example 42
def get_fdr_thresh(p_values, power): """ Calculate the false discovery rate threshold. :param p_values: a list of p-values obtained by executing the regression :param power: the thershold power being used (usually 0.05) :type p_values: numpy array :type power: float :returns: the false discovery rate :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] sn = sn[::-1] for i in range(len(sn)): thresh = power * i / len(sn) if sn[i] <= thresh: break return sn[i]
Example 43
def get_bon_thresh(normalized,power): #same """ Calculate the bonferroni correction threshold. Divide the power by the sum of all finite values (all non-nan values). :param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value. :param power: the threshold power being used (usually 0.05) :type normalized: numpy array :type power: float :returns: The bonferroni correction :rtype: float """ return power/sum(np.isfinite(normalized))
Example 44
def get_fdr_thresh(p_values, power): """ Calculate the false discovery rate threshold. :param p_values: a list of p-values obtained by executing the regression :param power: the thershold power being used (usually 0.05) :type p_values: numpy array :type power: float :returns: the false discovery rate :rtype: float """ sn = np.sort(p_values) sn = sn[np.isfinite(sn)] sn = sn[::-1] for i in range(len(sn)): thresh = power * i / len(sn) if sn[i] <= thresh: break return sn[i]
Example 45
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: view_target_data (ndarray): N x 3K blob of regression targets view_loss_weights (ndarray): N x 3K blob of loss weights """ view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32) view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32) inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0] for ind in inds: cls = clss[ind] start = 3 * cls end = start + 3 view_targets[ind, start:end] = viewpoint_data[ind, :] view_loss_weights[ind, start:end] = [1., 1., 1.] assert not np.isinf(view_targets).any(), 'viewpoint undefined' return view_targets, view_loss_weights
Example 46
def __call__(self, params, params_args, obj, idxs, alpha, prop_mode): params_dict = unflatten_dict(params, params_args) f, grad_dict = obj.objective_function( params_dict, idxs, alpha=alpha, prop_mode=prop_mode) g, _ = flatten_dict(grad_dict) g_is_fin = np.isfinite(g) if np.all(g_is_fin): self.previous_x = params return f, g else: print("Warning: inf or nan in gradient: replacing with zeros") return f, np.where(g_is_fin, g, 0.) # def objective_wrapper(params, params_args, obj, idxs, alpha): # params_dict = unflatten_dict(params, params_args) # f, grad_dict = obj.objective_function( # params_dict, idxs, alpha=alpha) # g, _ = flatten_dict(grad_dict) # g_is_fin = np.isfinite(g) # if np.all(g_is_fin): # return f, g # else: # print("Warning: inf or nan in gradient: replacing with zeros") # return f, np.where(g_is_fin, g, 0.)
Example 47
def calc_state(self): self.theta, theta_dot = self.j1.current_position() x, vx = self.slider.current_position() #assert( np.isfinite(x) ) if not np.isfinite(x): print("x is inf") x = 0 if not np.isfinite(vx): print("vx is inf") vx = 0 if not np.isfinite(self.theta): print("theta is inf") self.theta = 0 if not np.isfinite(theta_dot): print("theta_dot is inf") theta_dot = 0 return np.array([ x, vx, np.cos(self.theta), np.sin(self.theta), theta_dot ])
Example 48
def residual_multigauss(param, dataimage, nonfinite = 0.0, ravelresidual=True, showimages=False, verbose=False): """ Calculating the residual bestween the multigaussian model with the paramters 'param' and the data. --- INPUT --- param Parameters of multi-gaussian model to generate. See modelimage_multigauss() header for details dataimage Data image to take residual nonfinite Value to replace non-finite entries in residual with ravelresidual To np.ravel() the residual image set this to True. Needed by scipy.optimize.leastsq() optimizer function showimages To show model and residiual images set to True verbose Toggle verbosity --- EXAMPLE OF USE --- import tdose_model_FoV as tmf param = [18,31,1*0.3,2.1*0.3,1.2*0.3,30*0.3, 110,90,200*0.5,20.1*0.5,15.2*0.5,0*0.5] dataimg = pyfits.open('/Users/kschmidt/work/TDOSE/mock_cube_sourcecat161213_tdose_mock_cube.fits')[0].data[0,:,:] residual = tmf.residual_multigauss(param, dataimg, showimages=True) """ if verbose: ' - Estimating residual (= model - data) between model and data image' imgsize = dataimage.shape xgrid, ygrid = tu.gen_gridcomponents(imgsize) modelimg = tmf.modelimage_multigauss((xgrid, ygrid),param,imgsize,showmodelimg=showimages, verbose=verbose) residualimg = modelimg - dataimage if showimages: plt.imshow(residualimg,interpolation='none', vmin=1e-5, vmax=np.max(residualimg), norm=mpl.colors.LogNorm()) plt.title('Resdiaul (= model - data) image') plt.show() if nonfinite is not None: residualimg[~np.isfinite(residualimg)] = 0.0 if ravelresidual: residualimg = np.ravel(residualimg) return residualimg # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
Example 49
def test_server_logprob_shape(model): table = TINY_TABLE server = TreeCatServer(model) logprobs = server.logprob(table.data) N = table.num_rows assert logprobs.dtype == np.float32 assert logprobs.shape == (N, ) assert np.isfinite(logprobs).all()
Example 50
def test_ensemble_logprob_shape(ensemble): table = TINY_TABLE server = EnsembleServer(ensemble) logprobs = server.logprob(table.data) N = table.num_rows assert logprobs.dtype == np.float32 assert logprobs.shape == (N, ) assert np.isfinite(logprobs).all()