The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def array_stream(func): """ Decorates streaming functions to make sure that the stream is a stream of ndarrays. Objects that are not arrays are transformed into arrays. If the stream is in fact a single ndarray, this ndarray is repackaged into a sequence of length 1. The first argument of the decorated function is assumed to be an iterable of arrays, or an iterable of objects that can be casted to arrays. """ @wraps(func) # thanks functools def decorated(arrays, *args, **kwargs): if isinstance(arrays, ndarray): arrays = (arrays,) return func(map(atleast_1d, arrays), *args, **kwargs) return decorated
Example 2
def predict(self, states): """ Returns values for each state :param states states as feature -> value dict """ previous_workspace = workspace.CurrentWorkspace() workspace.SwitchWorkspace(self._workspace_id) for input_blob in states: workspace.FeedBlob( input_blob, np.atleast_1d(states[input_blob]).astype(np.float32) ) workspace.RunNet(self._net) result = { output: workspace.FetchBlob(output) for output in self._output_blobs } workspace.SwitchWorkspace(previous_workspace) return result
Example 3
def print_resul(sol): #============================================================================== # Impression des résultats pm, model, filename = sol.pm, sol.model, sol.filename print('\n\nInversion success!') print('Name of file:', filename) print('Model used:', model) try: pm.pop("cond_std") pm.pop("tau_i_std") pm.pop("m_i_std") except: pass e_keys = sorted([s for s in list(pm.keys()) if "_std" in s]) v_keys = [e.replace("_std", "") for e in e_keys] labels = ["{:<8}".format(x+":") for x in v_keys] np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')}) for l, v, e in zip(labels, v_keys, e_keys): if "noise" not in l: print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]), np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v]))) else: print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]))
Example 4
def do(self, a, b): d = linalg.det(a) (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, multiply.reduce(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) s = np.atleast_1d(s) ld = np.atleast_1d(ld) m = (s != 0) assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf)
Example 5
def mlsl_gather_send(self, gather_send_id, x_nparr): gather_send_op = self.gather_send_nodes[gather_send_id] # todo: get real root_idx root_idx = 0 # np.atleast_1d is used in cases when we need to reduce to a scalar value x_nparr = np.atleast_1d(x_nparr) if self.process_idx == root_idx: # todo: remove that workaround for non-symmetric case gather_send_op.arr = x_nparr else: send_buf = self.as_buffer(x_nparr) send_count = x_nparr.size recv_buf = None if gather_send_op.use_reduce: req = self.distribution.reduce(send_buf, send_buf, send_count, mlsl.DataType.FLOAT, mlsl.ReductionType.SUM, root_idx, mlsl.GroupType.DATA) else: req = self.distribution.gather(send_buf, send_count, recv_buf, mlsl.DataType.FLOAT, root_idx, mlsl.GroupType.DATA) self.mlsl_obj.wait(req)
Example 6
def NLS_annealing(F, xi, yi, p, N=100, n=10, sigma=5.,factor=0.5): # N = size of population in one iteration # n = number of iterations # sigma = initial (multiplicative) standard deviation # factor = factor to reduce sigma per iteration print "initial", p p = np.atleast_1d(p) dim = len(p) # make initial sigma act like multiplication by sigma^(+-1) sigma = np.log(sigma)*np.ones(dim) for k in range(n): # create new population by adding multiplicative gaussian noise P = p[None, :] * np.exp(np.random.randn(N, dim) * sigma[None, :]) # compute mean square loss on population f = np.mean((F(xi[None, :], P) - yi)**2, 1) # replace p by new best guess p = P[np.argmin(f), :] # update sigma sigma *= factor print "parameters:", p print "minimum", min(f) return tuple(p)
Example 7
def _idcheck(self, idpac): """Check the idpac parameter.""" idpac = np.atleast_1d(idpac) self._csuro = True if not all([isinstance(k, int) for k in idpac]) and (len(idpac) != 3): raise ValueError("idpac must be a tuple/list of 3 integers.") else: # Ozkurt PAC case : if idpac[0] == 4: idpac = np.array([4, 0, 0]) self._csuro = False if (idpac[1] == 0) or (idpac[2] == 0): self._csuro = False idpac = (idpac[0], 0, 0) self._idpac = idpac self.method, self.surro, self.norm = pacstr(idpac)
Example 8
def __init__(self, settings): Solver.__init__(self, settings) # setup solver if hasattr(self._model, "jacobian"): self._solver = ode(self._model.state_function, jac=self._model.jacobian) else: self._solver = ode(self._model.state_function) self._solver.set_integrator(self._settings["Mode"], method=self._settings["Method"], rtol=self._settings["rTol"], atol=self._settings["aTol"], max_step=self._settings["step size"] ) self._solver.set_initial_value(np.atleast_1d(self._model.initial_state), t=self._settings["start time"])
Example 9
def _calc_module(self, module_name): """ Calculates the output of a simulation module """ if module_name in self._simulation_modules.keys(): if self._counter[module_name] == \ self._simulation_modules[module_name].tick_divider: self._current_outputs[module_name] = np.atleast_1d( self._simulation_modules[module_name].calc_output( self._input_vector)) self._counter[module_name] = 1 else: self._counter[module_name] += 1 # update input vector self._input_vector.update( {module_name: self._current_outputs[module_name]})
Example 10
def score_hmm_logprob(bst, hmm, normalize=False): """Score events in a BinnedSpikeTrainArray by computing the log probability under the model. Parameters ---------- bst : BinnedSpikeTrainArray hmm : PoissonHMM normalize : bool, optional. Default is False. If True, log probabilities will be normalized by their sequence lengths. Returns ------- logprob : array of size (n_events,) Log probabilities, one for each event in bst. """ logprob = np.atleast_1d(hmm.score(bst)) if normalize: logprob = np.atleast_1d(logprob) / bst.lengths return logprob
Example 11
def get_samples(desired_data): all_samples = [] for data in desired_data: temperatures = np.atleast_1d(data['conditions']['T']) num_configs = np.array(data['solver'].get('sublattice_configurations'), dtype=np.object).shape[0] site_fractions = data['solver'].get('sublattice_occupancies', [[1]] * num_configs) site_fraction_product = [reduce(operator.mul, list(itertools.chain(*[np.atleast_1d(f) for f in fracs])), 1) for fracs in site_fractions] # TODO: Subtle sorting bug here, if the interactions aren't already in sorted order... interaction_product = [] for fracs in site_fractions: interaction_product.append(float(reduce(operator.mul, [f[0] - f[1] for f in fracs if isinstance(f, list) and len(f) == 2], 1))) if len(interaction_product) == 0: interaction_product = [0] comp_features = zip(site_fraction_product, interaction_product) all_samples.extend(list(itertools.product(temperatures, comp_features))) return all_samples
Example 12
def get_loc_reads(bp,bamf,max_dp): loc = '%s:%d:%d' % (bp['chrom'], max(0,bp['start']), bp['end']) loc_reads = np.empty([0,len(dtypes.read_dtype)],dtype=dtypes.read_dtype) err_code = 0 try: iter_loc = bamf.fetch(region=loc,until_eof=True) for x in iter_loc: read = read_to_array(x,bamf) if len(np.atleast_1d(read))>0: loc_reads = np.append(loc_reads,read) if len(loc_reads) > max_dp: print('Read depth too high at %s' % loc) err_code = 1 return np.empty(0), err_code loc_reads = np.sort(loc_reads,axis=0,order=['query_name','ref_start']) loc_reads = np.unique(loc_reads) #remove duplicates return loc_reads, err_code except ValueError: print('Fetching reads failed for loc: %s' % loc) err_code = 2 return np.empty(0), err_code
Example 13
def transform(self, X, n_components): """ Fit the dataset to the number of principal components specified in the constructor and return the transformed dataset """ covariance_matrix = calculate_covariance_matrix(X) # Where (eigenvector[:,0] corresponds to eigenvalue[0]) eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix) # Sort the eigenvalues and corresponding eigenvectors from largest # to smallest eigenvalue and select the first n_components idx = eigenvalues.argsort()[::-1] eigenvalues = eigenvalues[idx][:n_components] eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :n_components] # Project the data onto principal components X_transformed = X.dot(eigenvectors) return X_transformed
Example 14
def fit(self, X, y): # Separate data by class X1 = X[y == 0] X2 = X[y == 1] # Calculate the covariance matrices of the two datasets cov1 = calculate_covariance_matrix(X1) cov2 = calculate_covariance_matrix(X2) cov_tot = cov1 + cov2 # Calculate the mean of the two datasets mean1 = X1.mean(0) mean2 = X2.mean(0) mean_diff = np.atleast_1d(mean1 - mean2) # Determine the vector which when X is projected onto it best separates the # data by class. w = (mean1 - mean2) / (cov1 + cov2) self.w = np.linalg.pinv(cov_tot).dot(mean_diff)
Example 15
def __call__(self,alpha): """ Posterior distribution Returns --------- lnprob: float Natural log of posterior probability """ lp = self.lnprior(alpha) if np.isinf(lp): return -np.inf else: return np.atleast_1d(lp + self.lnlike(alpha))[0]
Example 16
def convolve_lsf(flux,lsf): if len(flux) < len(np.atleast_1d(lsf)): # Add padding to make sure to return the same length in flux. padding = np.ones(len(lsf)-len(flux)+1) flux = np.hstack([padding,flux]) conv_flux = 1-np.convolve(1-flux,lsf,mode='same') /np.sum(lsf) return conv_flux[len(padding):] else: # convolve 1-flux to remove edge effects wihtout using padding return 1-np.convolve(1-flux,lsf,mode='same') /np.sum(lsf) ############################################################################### # Convergence ###############################################################################
Example 17
def check_subjects(subjects_info): "Ensure subjects are provided and their data exist." if isinstance(subjects_info, str): if not pexists(subjects_info): raise IOError('path to subject list does not exist: {}'.format(subjects_info)) subjects_list = np.genfromtxt(subjects_info, dtype=str) elif isinstance(subjects_info, collections.Iterable): if len(subjects_info) < 1: raise ValueError('Empty subject list.') subjects_list = subjects_info else: raise ValueError('Invalid value provided for subject list. \n ' 'Must be a list of paths, or path to file containing list of paths, one for each subject.') subject_id_list = np.atleast_1d(subjects_list) num_subjects = subject_id_list.size if num_subjects < 1: raise ValueError('Input subject list is empty.') num_digits_id_size = len(str(num_subjects)) max_id_width = max(map(len, subject_id_list)) return subject_id_list, num_subjects, max_id_width, num_digits_id_size
Example 18
def __init__(self,alpha,color): # alpha for the whole image: assert alpha.ndim==2 self.alpha = alpha [n,m] = alpha.shape[:2] color=np.atleast_1d(np.array(color)).astype('uint8') # color for the image: if color.ndim==1: # constant color for whole layer ncol = color.size if ncol == 1 : #grayscale layer self.color = color * np.ones((n,m,3),'uint8') if ncol == 3 : self.color = np.ones((n,m,3),'uint8') * color[None,None,:] elif color.ndim==2: # grayscale image self.color = np.repeat(color[:,:,None],repeats=3,axis=2).copy().astype('uint8') elif color.ndim==3: #rgb image self.color = color.copy().astype('uint8') else: print color.shape raise Exception("color datatype not understood")
Example 19
def do(self, a, b): d = linalg.det(a) (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, multiply.reduce(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) s = np.atleast_1d(s) ld = np.atleast_1d(ld) m = (s != 0) assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf)
Example 20
def cT_helper(x, y, z, in_srs, out_srs): """Helper function that wraps osr CoordinatTransformation """ x, y, z = np.atleast_1d(x), np.atleast_1d(y), np.atleast_1d(z) #Handle cases where z is 0 - probably a better way to use broadcasting for this if x.shape[0] != z.shape[0]: #Watch out for masked array input here orig_z = z[0] z = np.zeros_like(x) z[:] = orig_z orig_shape = x.shape cT = osr.CoordinateTransformation(in_srs, out_srs) #x2, y2, z2 = zip(*[cT.TransformPoint(*xyz) for xyz in zip(x, y, z)]) x2, y2, z2 = list(zip(*[cT.TransformPoint(*xyz) for xyz in zip(np.ravel(x),np.ravel(y),np.ravel(z))])) if len(x2) == 1: x2, y2, z2 = x2[0], y2[0], z2[0] else: x2 = np.array(x2).reshape(orig_shape) y2 = np.array(y2).reshape(orig_shape) z2 = np.array(z2).reshape(orig_shape) return x2, y2, z2
Example 21
def plotGPGO(gpgo, param): param_value = list(param.values())[0][1] x_test = np.linspace(param_value[0], param_value[1], 1000).reshape((1000, 1)) hat = gpgo.GP.predict(x_test, return_std=True) y_hat, y_std = hat[0], np.sqrt(hat[1]) l, u = y_hat - 1.96 * y_std, y_hat + 1.96 * y_std fig = plt.figure() r = fig.add_subplot(2, 1, 1) r.set_title('Fitted Gaussian process') plt.fill_between(x_test.flatten(), l, u, alpha=0.2) plt.plot(x_test.flatten(), y_hat, color='red', label='Posterior mean') plt.legend(loc=0) a = np.array([-gpgo._acqWrapper(np.atleast_1d(x)) for x in x_test]).flatten() r = fig.add_subplot(2, 1, 2) r.set_title('Acquisition function') plt.plot(x_test, a, color='green') gpgo._optimizeAcq(method='L-BFGS-B', n_start=1000) plt.axvline(x=gpgo.best, color='black', label='Found optima') plt.legend(loc=0) plt.tight_layout() plt.savefig(os.path.join(os.getcwd(), 'mthesis_text/figures/chapter3/sine/{}.pdf'.format(i))) plt.show()
Example 22
def _hz_to_mel(frequencies, htk=False): frequencies = np.atleast_1d(frequencies) if htk: return 2595.0 * np.log10(1.0 + frequencies / 700.0) # Fill in the linear part f_min = 0.0 f_sp = 200.0 / 3 mels = (frequencies - f_min) / f_sp # Fill in the log-scale part min_log_hz = 1000.0 # beginning of log region (Hz) min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels) logstep = np.log(6.4) / 27.0 # step size for log region log_t = (frequencies >= min_log_hz) mels[log_t] = min_log_mel + np.log(frequencies[log_t]/min_log_hz) / logstep return mels
Example 23
def integral(x, y, I, k=10): """ Integrate y = f(x) for x = 0 to a such that the integral = I I can be an array. Returns the values a that are found. """ I = np.atleast_1d(I) f = UnivariateSpline(x, y, s=k) # Integrate as a function of x F = f.antiderivative() Y = F(x) a = [] for intval in I: F2 = UnivariateSpline(x, Y/Y[-1] - intval, s=0) a.append(F2.roots()) return np.hstack(a)
Example 24
def do(self, a, b): d = linalg.det(a) (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, multiply.reduce(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) s = np.atleast_1d(s) ld = np.atleast_1d(ld) m = (s != 0) assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf)
Example 25
def simulate_stock_path(self, X0, T): r""" Simulates the the sequence of Employment and Unemployent stocks Parameters ------------ X0 : array Contains initial values (E0, U0) T : int Number of periods to simulate Returns --------- X : iterator Contains sequence of employment and unemployment stocks """ X = np.atleast_1d(X0) # Recast as array just in case for t in range(T): yield X X = self.A @ X
Example 26
def simulate_rate_path(self, x0, T): r""" Simulates the the sequence of employment and unemployent rates. Parameters ------------ x0 : array Contains initial values (e0,u0) T : int Number of periods to simulate Returns --------- x : iterator Contains sequence of employment and unemployment rates """ x = np.atleast_1d(x0) # Recast as array just in case for t in range(T): yield x x = self.A_hat @ x
Example 27
def do(self, a, b): d = linalg.det(a) (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, multiply.reduce(ev, axis=-1)) assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) s = np.atleast_1d(s) ld = np.atleast_1d(ld) m = (s != 0) assert_almost_equal(np.abs(s[m]), 1) assert_equal(ld[~m], -inf)
Example 28
def make_2d(array): """ tiny tool to expand 1D arrays the way i want Parameters ---------- array : array-like Returns ------- np.array of with ndim = 2 """ array = np.asarray(array) if array.ndim < 2: msg = 'Expected 2D input data array, but found {}D. '\ 'Expanding to 2D.'.format(array.ndim) warnings.warn(msg) array = np.atleast_1d(array)[:,None] return array
Example 29
def round_to_n_decimal_places(array, n=3): """ tool to keep round a float to n decimal places. n=3 by default Parameters ---------- array : np.array n : int. number of decimal places to keep Returns ------- array : rounded np.array """ # check if in scientific notation if issubclass(array.__class__, float) and '%.e'%array == str(array): return array # do nothing shape = np.shape(array) out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n)) return out.reshape(shape)
Example 30
def ylogydu(y, u): """ tool to give desired output for the limit as y -> 0, which is 0 Parameters ---------- y : array-like of len(n) u : array-like of len(n) Returns ------- np.array len(n) """ mask = (np.atleast_1d(y)!=0.) out = np.zeros_like(u) out[mask] = y[mask] * np.log(y[mask] / u[mask]) return out
Example 31
def atal(x, order, num_coefs): x = np.atleast_1d(x) n = x.size if x.ndim > 1: raise ValueError("Only rank 1 input supported for now.") if not np.isrealobj(x): raise ValueError("Only real input supported for now.") a, e, kk = lpc(x, order) c = np.zeros(num_coefs) c[0] = a[0] for m in range(1, order+1): c[m] = - a[m] for k in range(1, m): c[m] += (float(k)/float(m)-1)*a[k]*c[m-k] for m in range(order+1, num_coefs): for k in range(1, order+1): c[m] += (float(k)/float(m)-1)*a[k]*c[m-k] return c
Example 32
def return_real_part(to_return): """ Check if the imaginary part of to_return vanishes and return the real part :param to_return: :return: """ if not isinstance(to_return, (Number, list, np.ndarray)): raise TypeError if isinstance(to_return, (list, np.ndarray)): if not all([isinstance(num, Number) for num in to_return]): raise TypeError maybe_real = np.atleast_1d(np.real_if_close(to_return)) if maybe_real.dtype == 'complex': raise ValueError("Something goes wrong, imaginary part does not vanish") else: if maybe_real.shape == (1,): maybe_real = maybe_real[0] return maybe_real
Example 33
def __init__(self, bounds=None, num=None, step=None, points=None): if points is not None: # points are given, easy one self._values = np.atleast_1d(points) self._limits = (points.min(), points.max()) self._num = points.size # TODO check for evenly spaced entries # for now just use provided information self._step = step elif bounds and num: self._limits = bounds self._num = num self._values, self._step = np.linspace(bounds[0], bounds[1], num, retstep=True) if step is not None and not np.isclose(self._step, step): raise ValueError("could not satisfy both redundant requirements for num and step!") elif bounds and step: self._limits = bounds # calculate number of needed points but save correct step size self._num = int((bounds[1] - bounds[0]) / step + 1.5) self._values, self._step = np.linspace(bounds[0], bounds[1], self._num, retstep=True) if np.abs(step - self._step) > 1e-1: warnings.warn("desired step-size {} doesn't fit to given interval," " changing to {}".format(step, self._step)) else: raise ValueError("not enough arguments provided!")
Example 34
def power_series(z, t, C, spatial_der_order=0): if not all([isinstance(item, (Number, np.ndarray)) for item in [z, t]]): raise TypeError z = np.atleast_1d(z) t = np.atleast_1d(t) if not all([len(item.shape) == 1 for item in [z, t]]): raise ValueError x = np.nan*np.zeros((len(t), len(z))) for i in range(len(z)): sum_x = np.zeros(t.shape[0]) for j in range(len(C)-spatial_der_order): sum_x += C[j+spatial_der_order][0, :]*z[i]**j/sm.factorial(j) x[:, i] = sum_x if any([dim == 1 for dim in x.shape]): x = x.flatten() return x
Example 35
def _check_domain(self, value): """ checks if value fits into domain :param value: point(s) where function shall be evaluated :raises: ValueError if value not in domain """ in_domain = False value = np.atleast_1d(value) for interval in self.domain: if all(value >= interval[0]) and all(value <= interval[1]): in_domain = True break if not in_domain: raise ValueError("Function evaluated outside its domain!")
Example 36
def test_scalar_vector(self): x0 = 0.5 jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0, method='2-point', as_linear_operator=True) jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0, as_linear_operator=True) jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0, method='cs', as_linear_operator=True) jac_true = self.jac_scalar_vector(np.atleast_1d(x0)) np.random.seed(1) for i in range(10): p = np.random.uniform(-10, 10, size=(1,)) assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5) assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=5e-6) assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=5e-6)
Example 37
def test_vector_scalar(self): x0 = np.array([100.0, -0.5]) jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0, method='2-point', as_linear_operator=True) jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0, as_linear_operator=True) jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0, method='cs', as_linear_operator=True) jac_true = self.jac_vector_scalar(x0) np.random.seed(1) for i in range(10): p = np.random.uniform(-10, 10, size=x0.shape) assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)), rtol=1e-5) assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)), rtol=5e-6) assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)), rtol=1e-7)
Example 38
def _getterChannels(self, channels, gcsFunction, valueArrayClass): chArray= np.atleast_1d(channels) value= valueArrayClass([0] * len(chArray)) gcsFunction.argtypes= [c_int, CIntArray, valueArrayClass, c_int] self._convertErrorToException( gcsFunction(self._id, CIntArray(chArray), value, len(chArray))) return value.toNumpyArray()
Example 39
def _setterChannels(self, channels, value, gcsFunction, valueArrayClass): valueArray= np.atleast_1d(value) assert len(channels) == len(valueArray) gcsFunction.argtypes= [c_int, CIntArray, valueArrayClass, c_int] self._convertErrorToException( gcsFunction(self._id, CIntArray(channels), valueArrayClass(valueArray), len(channels)))
Example 40
def _setterAxes(self, axesString, value, gcsFunction, valueArrayClass): nCh= len(axesString.split()) valueArray= np.atleast_1d(value) assert nCh == len(valueArray) gcsFunction.argtypes= [c_int, c_char_p, valueArrayClass] self._convertErrorToException( gcsFunction(self._id, axesString, valueArrayClass(valueArray)))
Example 41
def setServoControlMode(self, axesString, controlMode): self._setterAxes( axesString, np.atleast_1d(controlMode).astype('int'), self._lib.PI_SVO, CIntArray)
Example 42
def _arrayToDict(self, dicto, keys, values): valueArray= np.atleast_1d(values) assert len(keys) == len(valueArray), \ "%d %d" % (len(keys), len(valueArray)) for i in range(len(keys)): dicto[keys[i]]= valueArray[i]
Example 43
def __init__(self, loss_fn=None, initial_position=None, test_model=None, batch_size=None, burn_in=0, step_sizes=.0001, step_probabilities=1., **kwargs): """ Creates a new MCMC_sampler object. :param loss_fn: Target loss function without regularisaion terms :param initial_position: Initial network weights as a 2-d array of shape [number of chains, number of weights] :param test_model: The model used on the test data. Default=None :param batch_size: Batch size used for stochastic sampling methods. Default=None :param burn_in: Number of burn-in samples. Default=0 :param step_sizes: Step size or a list of step sizes. Default=.0001 :param step_probabilities: Probabilities to choose a step from step_sizes, must sum to 1. Default=1 """ super().__init__(**kwargs) self.loss_fn = loss_fn self.test_model = test_model self.initial_position = np.asarray(initial_position, dtype=np.float32) self.position_shape = self.initial_position.shape self.position_size = self.initial_position.shape[1] # total number of parameters of one network # data and parameter shapes self.chains_num = self.initial_position.shape[0] # number of chains to run in parallel self.batch_size = batch_size if batch_size is not None else self.train_size self.batch_x_shape = (self.batch_size, self.input_dim) self.batch_y_shape = (self.batch_size, self.output_dim) # common parameters self.step_sizes = np.atleast_1d(np.asarray(step_sizes, dtype=np.float32)) self.step_probabilities = np.atleast_1d(np.asarray(step_probabilities, dtype=np.float32)) self.burn_in = burn_in self.step_multiplier = np.ones(shape=(self.chains_num,), dtype=np.float32) # monitor acceptance rate for reporting self.avg_acceptance_rate = np.ones(shape=(self.chains_num,), dtype=np.float32) self.avg_acceptance_rate_lambda = 0.99 self._has_burned_in = False
Example 44
def vector_norm(data, axis=None, out=None): """Return length, i.e. eucledian norm, of ndarray along axis. >>> v = numpy.random.random(3) >>> n = vector_norm(v) >>> numpy.allclose(n, numpy.linalg.norm(v)) True >>> v = numpy.random.rand(6, 5, 3) >>> n = vector_norm(v, axis=-1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) True >>> n = vector_norm(v, axis=1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> v = numpy.random.rand(5, 4, 3) >>> n = numpy.empty((5, 3), dtype=numpy.float64) >>> vector_norm(v, axis=1, out=n) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> vector_norm([]) 0.0 >>> vector_norm([1.0]) 1.0 """ data = numpy.array(data, dtype=numpy.float64, copy=True) if out is None: if data.ndim == 1: return math.sqrt(numpy.dot(data, data)) data *= data out = numpy.atleast_1d(numpy.sum(data, axis=axis)) numpy.sqrt(out, out) return out else: data *= data numpy.sum(data, axis=axis, out=out) numpy.sqrt(out, out)
Example 45
def vector_norm(data, axis=None, out=None): """Return length, i.e. Euclidean norm, of ndarray along axis. >>> v = numpy.random.random(3) >>> n = vector_norm(v) >>> numpy.allclose(n, numpy.linalg.norm(v)) True >>> v = numpy.random.rand(6, 5, 3) >>> n = vector_norm(v, axis=-1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) True >>> n = vector_norm(v, axis=1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> v = numpy.random.rand(5, 4, 3) >>> n = numpy.empty((5, 3)) >>> vector_norm(v, axis=1, out=n) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> vector_norm([]) 0.0 >>> vector_norm([1]) 1.0 """ data = numpy.array(data, dtype=numpy.float64, copy=True) if out is None: if data.ndim == 1: return math.sqrt(numpy.dot(data, data)) data *= data out = numpy.atleast_1d(numpy.sum(data, axis=axis)) numpy.sqrt(out, out) return out else: data *= data numpy.sum(data, axis=axis, out=out) numpy.sqrt(out, out)
Example 46
def vector_norm(data, axis=None, out=None): """Return length, i.e. Euclidean norm, of ndarray along axis. >>> v = numpy.random.random(3) >>> n = vector_norm(v) >>> numpy.allclose(n, numpy.linalg.norm(v)) True >>> v = numpy.random.rand(6, 5, 3) >>> n = vector_norm(v, axis=-1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) True >>> n = vector_norm(v, axis=1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> v = numpy.random.rand(5, 4, 3) >>> n = numpy.empty((5, 3)) >>> vector_norm(v, axis=1, out=n) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> vector_norm([]) 0.0 >>> vector_norm([1]) 1.0 """ data = numpy.array(data, dtype=numpy.float64, copy=True) if out is None: if data.ndim == 1: return math.sqrt(numpy.dot(data, data)) data *= data out = numpy.atleast_1d(numpy.sum(data, axis=axis)) numpy.sqrt(out, out) return out else: data *= data numpy.sum(data, axis=axis, out=out) numpy.sqrt(out, out)
Example 47
def chunk_control_matrices( self, control_ipds_fn, control_ipds_N_fn, control_kmers_fn ): """ """ kmers = np.atleast_1d(np.loadtxt(control_kmers_fn, dtype="str")) fns = [control_ipds_fn, control_ipds_N_fn] n_chunks = 99 chunksize = int(math.ceil(float( len(kmers)/n_chunks ))) cols_chunks = list(chunks( range(len(kmers)), chunksize )) args = [] for i,cols_chunk in enumerate(cols_chunks): cut_CMDs = [] for fn in fns: cut_cols = "%s-%s" % ((cols_chunk[0]+1), (cols_chunk[-1]+1)) in_fn = fn out_fn = fn+".sub.%s" % i cut_CMD = "cut -d$\'\\t\' -f%s %s > %s" % (cut_cols, in_fn, out_fn) cut_CMDs.append(cut_CMD) args.append( (i, cut_CMDs, kmers, cols_chunk, n_chunks, self.opts.min_motif_count) ) results = mbin.launch_pool(self.opts.procs, process_contig_chunk, args) logging.info("Combining motifs from all chunks of control data...") not_found = 0 control_means = {} for i,result in enumerate(results): not_found += result[1] for motif in result[0].keys(): control_means[motif] = result[0][motif] logging.info("Done.") return control_means,not_found
Example 48
def vector_norm(data, axis=None, out=None): """Return length, i.e. eucledian norm, of ndarray along axis. >>> v = numpy.random.random(3) >>> n = vector_norm(v) >>> numpy.allclose(n, numpy.linalg.norm(v)) True >>> v = numpy.random.rand(6, 5, 3) >>> n = vector_norm(v, axis=-1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) True >>> n = vector_norm(v, axis=1) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> v = numpy.random.rand(5, 4, 3) >>> n = numpy.empty((5, 3), dtype=numpy.float64) >>> vector_norm(v, axis=1, out=n) >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) True >>> vector_norm([]) 0.0 >>> vector_norm([1.0]) 1.0 """ data = numpy.array(data, dtype=numpy.float64, copy=True) if out is None: if data.ndim == 1: return math.sqrt(numpy.dot(data, data)) data *= data out = numpy.atleast_1d(numpy.sum(data, axis=axis)) numpy.sqrt(out, out) return out else: data *= data numpy.sum(data, axis=axis, out=out) numpy.sqrt(out, out)
Example 49
def predict(self, states, actions): """ Returns values for each state :param states states as feature -> value dict """ previous_workspace = workspace.CurrentWorkspace() workspace.SwitchWorkspace(self._workspace_id) for name, value in states.items(): workspace.FeedBlob(name, np.atleast_1d(value).astype(np.float32)) for name, value in actions.items(): workspace.FeedBlob(name, np.atleast_1d(value).astype(np.float32)) workspace.RunNet(self._net) result = {'Q': workspace.FetchBlob(self._output_blobs[0])} workspace.SwitchWorkspace(previous_workspace) return result
Example 50
def normalize(a, axis=-1, order=2): l2 = np.atleast_1d(np.linalg.norm(a, order, axis)) l2[l2 == 0] = 1 return a / np.expand_dims(l2, axis)