The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def rhoA(self): # rhoA rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent) for i in range(self.lenlatent): weights = pd.DataFrame(self.outer_weights[self.latent[i]]) weights = weights[(weights.T != 0).any()] result = pd.DataFrame.dot(weights.T, weights) result_ = pd.DataFrame.dot(weights, weights.T) S = self.data_[self.Variables['measurement'][ self.Variables['latent'] == self.latent[i]]] S = pd.DataFrame.dot(S.T, S) / S.shape[0] numerador = ( np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights)) denominador = ( (np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights))) rhoA_ = ((result)**2) * (numerador / denominador) if(np.isnan(rhoA_.values)): rhoA[self.latent[i]] = 1 else: rhoA[self.latent[i]] = rhoA_.values return rhoA.T
Example 2
def annopred_inf(beta_hats, pr_sigi, n=1000, reference_ld_mats=None, ld_window_size=100): """ infinitesimal model with snp-specific heritability derived from annotation used as the initial values for MCMC of non-infinitesimal model """ num_betas = len(beta_hats) updated_betas = sp.empty(num_betas) m = len(beta_hats) for i, wi in enumerate(range(0, num_betas, ld_window_size)): start_i = wi stop_i = min(num_betas, wi + ld_window_size) curr_window_size = stop_i - start_i Li = 1.0/pr_sigi[start_i: stop_i] D = reference_ld_mats[i] A = (n/(1))*D + sp.diag(Li) A_inv = linalg.pinv(A) updated_betas[start_i: stop_i] = sp.dot(A_inv / (1.0/n) , beta_hats[start_i: stop_i]) # Adjust the beta_hats return updated_betas
Example 3
def alpha(self): # Cronbach Alpha alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent) for i in range(self.lenlatent): block = self.data_[self.Variables['measurement'] [self.Variables['latent'] == self.latent[i]]] p = len(block.columns) if(p != 1): p_ = len(block) correction = np.sqrt((p_ - 1) / p_) soma = np.var(np.sum(block, axis=1)) cor_ = pd.DataFrame.corr(block) denominador = soma * correction**2 numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_))) alpha_ = (numerador / denominador) * (p / (p - 1)) alpha[self.latent[i]] = alpha_ else: alpha[self.latent[i]] = 1 return alpha.T
Example 4
def _compute_process_and_covariance_matrices(self, dt): """Computes the transition and covariance matrix of the process model and measurement model. Args: dt (float): Timestep of the discrete transition. Returns: F (numpy.ndarray): Transition matrix. Q (numpy.ndarray): Process covariance matrix. R (numpy.ndarray): Measurement covariance matrix. """ F = np.array(np.bmat([[np.eye(3), dt * np.eye(3)], [np.zeros((3, 3)), np.eye(3)]])) self.process_matrix = F q_p = self.process_covariance_position q_v = self.process_covariance_velocity Q = np.diag([q_p, q_p, q_p, q_v, q_v, q_v]) ** 2 * dt r = self.measurement_covariance R = r * np.eye(4) self.process_covariance = Q self.measurement_covariance = R return F, Q, R
Example 5
def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov
Example 6
def get_scores(self): """Returns accuracy score evaluation result. - overall accuracy - mean accuracy - mean IU - fwavacc """ hist = self.confusion_matrix acc = np.diag(hist).sum() / hist.sum() acc_cls = np.diag(hist) / hist.sum(axis=1) acc_cls = np.nanmean(acc_cls) iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) mean_iu = np.nanmean(iu) freq = hist.sum(axis=1) / hist.sum() fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() cls_iu = dict(zip(range(self.n_classes), iu)) return {'Overall Acc: \t': acc, 'Mean Acc : \t': acc_cls, 'FreqW Acc : \t': fwavacc, 'Mean IoU : \t': mean_iu,}, cls_iu
Example 7
def scale_variance(Theta, eps): """Allows to scale a Precision Matrix such that its corresponding covariance has unit variance Parameters ---------- Theta: ndarray Precision Matrix eps: float values to threshold to zero Returns ------- Theta: ndarray Precision of rescaled Sigma Sigma: ndarray Sigma with ones on diagonal """ Sigma = np.linalg.inv(Theta) V = np.diag(np.sqrt(np.diag(Sigma) ** -1)) Sigma = V.dot(Sigma).dot(V.T) # = VSV Theta = np.linalg.inv(Sigma) Theta[np.abs(Theta) <= eps] = 0. return Theta, Sigma
Example 8
def sampson_error(F, pts1, pts2): """ Computes the sampson error for F, and points pts1, pts2. Sampson error is the first order approximation to the geometric error. Remember that this is a squared error. (x'^{T} * F * x)^2 ----------------- (F * x)_1^2 + (F * x)_2^2 + (F^T * x')_1^2 + (F^T * x')_2^2 where (F * x)_i^2 is the square of the i-th entry of the vector Fx """ x1, x2 = unproject_points(pts1).T, unproject_points(pts2).T Fx1 = np.dot(F, x1) Fx2 = np.dot(F, x2) # Sampson distance as error measure denom = Fx1[0]**2 + Fx1[1]**2 + Fx2[0]**2 + Fx2[1]**2 return ( np.diag(x1.T.dot(Fx2)) )**2 / denom
Example 9
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = compute_xopt(self.rseed, dim) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = (self.condition ** .5) ** linspace(0, 1, dim) self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales)) # decouple scaling from function definition self.linearTF = dot(self.linearTF, self.rotation) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(self.xopt, curshape)
Example 10
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = compute_xopt(self.rseed, dim) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = self.condition ** linspace(0, 1, dim) self.linearTF = dot(compute_rotation(self.rseed, dim), diag(((self.condition / 10.)**.5) ** linspace(0, 1, dim))) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(self.xopt, curshape)
Example 11
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = compute_xopt(self.rseed, dim) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = (self.condition ** .5) ** linspace(0, 1, dim) self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales)) self.linearTF = dot(self.linearTF, self.rotation) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(self.xopt, curshape)
Example 12
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = compute_xopt(self.rseed, dim) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = (self.condition ** .5) ** linspace(0, 1, dim) self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales)) # decouple scaling from function definition self.linearTF = dot(self.linearTF, self.rotation) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(self.xopt, curshape) self.arrexpo = resize(self.beta * linspace(0, 1, dim), curshape)
Example 13
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = compute_xopt(self.rseed, dim) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = (1. / self.condition ** .5) ** linspace(0, 1, dim) # CAVE? self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales)) # decouple scaling from function definition self.linearTF = dot(self.linearTF, self.rotation) K = np.arange(0, 12) self.aK = np.reshape(0.5 ** K, (1, 12)) self.bK = np.reshape(3. ** K, (1, 12)) self.f0 = np.sum(self.aK * np.cos(2 * np.pi * self.bK * 0.5)) # optimal value # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(self.xopt, curshape)
Example 14
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = compute_xopt(self.rseed, dim) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = (self.condition ** .5) ** linspace(0, 1, dim) self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales)) # decouple scaling from function definition self.linearTF = dot(self.linearTF, self.rotation) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(self.xopt, curshape)
Example 15
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = .5 * self._mu1 * sign(gauss(dim, self.rseed)) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = (self.condition ** .5) ** linspace(0, 1, dim) self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales)) # decouple scaling from function definition self.linearTF = dot(self.linearTF, self.rotation) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape # self.arrxopt = resize(self.xopt, curshape) self.arrscales = resize(2. * sign(self.xopt), curshape) # makes up for xopt
Example 16
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> numpy.allclose(M, numpy.identity(4)) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) n = numpy.dot(q, q) if n < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / n) q = numpy.outer(q, q) return numpy.array([ [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], [ 0.0, 0.0, 0.0, 1.0]])
Example 17
def distribution_parameters(self, parameter_name): if parameter_name=='trend': E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix) mean = dot(inv(eye(self.size)+E),self.data) cov = (self.parameters.list['sigma2'].current_value)*inv(eye(self.size)+E) return {'mean' : mean, 'cov' : cov} elif parameter_name=='sigma2': E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix) pos = self.size loc = 0 scale = 0.5*dot((self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)).T,(self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)))+0.5*dot(dot(self.parameters.list['trend'].current_value.T,E),self.parameters.list['trend'].current_value) elif parameter_name=='lambda2': pos = self.size-self.total_variation_order-1+self.alpha loc = 0.5*(norm(dot(self.derivative_matrix,self.parameters.list['trend'].current_value),ord=1))/self.parameters.list['sigma2'].current_value+self.rho scale = 1 elif parameter_name==str('omega'): pos = [sqrt(((self.parameters.list['lambda2'].current_value**2)*self.parameters.list['sigma2'].current_value)/(dj**2)) for dj in dot(self.derivative_matrix,self.parameters.list['trend'].current_value)] loc = 0 scale = self.parameters.list['lambda2'].current_value**2 return {'pos' : pos, 'loc' : loc, 'scale' : scale}
Example 18
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> numpy.allclose(M, numpy.identity(4)) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) n = numpy.dot(q, q) if n < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / n) q = numpy.outer(q, q) return numpy.array([ [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], [ 0.0, 0.0, 0.0, 1.0]])
Example 19
def test_psi(adjcube): """Tests retrieval of the wave functions and eigenvalues. """ from pydft.bases.fourier import psi, O, H cell = adjcube V = QHO(cell) W = W4(cell) Ns = W.shape[1] Psi, epsilon = psi(V, W, cell, forceR=False) #Make sure that the eigenvalues are real. assert np.sum(np.imag(epsilon)) < 1e-13 checkI = np.dot(Psi.conjugate().T, O(Psi, cell)) assert abs(np.sum(np.diag(checkI))-Ns) < 1e-13 # Should be the identity assert np.abs(np.sum(checkI)-Ns) < 1e-13 checkD = np.dot(Psi.conjugate().T, H(V, Psi, cell)) diagsum = np.sum(np.diag(checkD)) assert np.abs(np.sum(checkD)-diagsum) < 1e-12 # Should be diagonal # Should match the diagonal elements of previous matrix assert np.allclose(np.diag(checkD), epsilon)
Example 20
def computePolarVecs(self,karg=False): N = len(self.times) L = np.reshape(self.L,(3,N)) if karg is False: A = self.computeRotMatrix() elif np.size(karg) is 3: A = self.computeRotMatrix(karg) elif np.size(karg) is 9: A = karg q = np.zeros((6,N)) for pp in range(0,N): Lpp = np.diag(L[:,pp]) p = np.dot(A,np.dot(Lpp,A.T)) q[:,pp] = np.r_[p[:,0],p[[1,2],1],p[2,2]] return q
Example 21
def toa_normalize(x0, y0): xdim = x0.shape[0] m = x0.shape[1] n = x0.shape[1] t = -x0[:, 1] x = x0 + np.tile(t, (1, m)) y = y0 + np.tile(t, (1, n)) qr_a = x[:, 2:(1 + xdim)] q, r = scipy.linalg.qr(qr_a) x = (q.conj().T) * x y = (q.conj().T) * y M = np.diag(sign(np.diag(qr_a))) x1 = M * x y1 = M * y return x1, y1
Example 22
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u): # Params: # J - column indices nnz_i = len(J) residual_i = y_i - mu0 - c[J] prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u))) v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :])) post_Phi_i = prior_Phi + \ np.dot(v_T.T, np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T) # Weighted sum of v_j * v_j.T post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T)) C, lower = scipy.linalg.cho_factor(post_Phi_i) post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i) # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation. ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)), lower=lower) ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i))) r_i = ru_i[0] u_i = ru_i[1:] return r_i, u_i
Example 23
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v): prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v))) nnz_j = len(I) residual_j = y_j - mu0 - r[I] u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :])) post_Phi_j = prior_Phi + \ np.dot(u_T.T, np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T) # Weighted sum of u_i * u_i.T post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T)) C, lower = scipy.linalg.cho_factor(post_Phi_j) post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j) # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation. cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)), lower=lower) cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j))) c_j = cv_j[0] v_j = cv_j[1:] return c_j, v_j
Example 24
def diag(v, k=0): """ Extract a diagonal or construct a diagonal array. This function is the equivalent of `numpy.diag` that takes masked values into account, see `numpy.diag` for details. See Also -------- numpy.diag : Equivalent function for ndarrays. """ output = np.diag(v, k).view(MaskedArray) if getmask(v) is not nomask: output._mask = np.diag(v._mask, k) return output
Example 25
def predict_log_likelihood(self, paths, latents): if self.recurrent: observations = np.array([p["observations"][:, self.obs_regressed] for p in paths]) actions = np.array([p["actions"][:, self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=2) # latents must match first 2dim: (batch,time) else: observations = np.concatenate([p["observations"][:, self.obs_regressed] for p in paths]) actions = np.concatenate([p["actions"][:, self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=1) latents = np.concatenate(latents, axis=0) if self.noisify_traj_coef: noise = np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions, axis=0)), cov=np.diag(np.mean(np.abs(obs_actions), axis=0) * self.noisify_traj_coef), size=np.shape(obs_actions)[0]) obs_actions += noise if self.use_only_sign: obs_actions = np.sign(obs_actions) return self._regressor.predict_log_likelihood(obs_actions, latents) # see difference with fit above...
Example 26
def lowb_mutual(self, paths, times=(0, None)): if self.recurrent: observations = np.array([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths]) actions = np.array([p["actions"][times[0]:times[1], self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=2) latents = np.array([p['agent_infos']['latents'][times[0]:times[1]] for p in paths]) else: observations = np.concatenate([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths]) actions = np.concatenate([p["actions"][times[0]:times[1], self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=1) latents = np.concatenate([p['agent_infos']["latents"][times[0]:times[1]] for p in paths]) if self.noisify_traj_coef: obs_actions += np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions,axis=0)), cov=np.diag(np.mean(np.abs(obs_actions), axis=0) * self.noisify_traj_coef), size=np.shape(obs_actions)[0]) if self.use_only_sign: obs_actions = np.sign(obs_actions) H_latent = self.policy.latent_dist.entropy(self.policy.latent_dist_info) # sum of entropies latents in return H_latent + np.mean(self._regressor.predict_log_likelihood(obs_actions, latents))
Example 27
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> numpy.allclose(M, numpy.identity(4)) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) n = numpy.dot(q, q) if n < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / n) q = numpy.outer(q, q) return numpy.array([ [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], [ 0.0, 0.0, 0.0, 1.0]])
Example 28
def gaussianWeight(kernelSize, even=False): if even == True: weight = np.ones([kernelSize,kernelSize]) weight = weight.reshape((1,kernelSize**2)) weight = np.array(weight)[0] weight = np.diag(weight) return weight SIGMA = 1 #the standard deviation of your normal curve CORRELATION = 0 #see wiki for multivariate normal distributions weight = np.zeros([kernelSize,kernelSize]) cpt = kernelSize%2 + kernelSize//2 #gets the center point for i in range(len(weight)): for j in range(len(weight)): ptx = i + 1 pty = j + 1 weight[i,j] = 1 / (2*np.pi*SIGMA**2) / (1-CORRELATION**2)**.5*np.exp(-1/(2*(1-CORRELATION**2))*((ptx-cpt)**2+(pty-cpt)**2)/(SIGMA**2)) # weight[i,j] = 1/SIGMA/(2*np.pi)**.5*np.exp(-(pt-cpt)**2/(2*SIGMA**2)) weight = weight.reshape((1,kernelSize**2)) weight = np.array(weight)[0] #convert to a 1D array weight = np.diag(weight) #convert to n**2xn**2 diagonal matrix return weight # return np.diag(weight)
Example 29
def get_sigma(self): """Returns the co-variance matrix of the spline Return: (np.ndarray): Co-variance matrix for the EOS shape is (nxn) where n is the dof of the model """ sigma = self.get_option('spline_sigma') return np.diag((sigma * self.prior.get_dof())**2) # alpha = 3/self.shape()\ # * np.log(self.get_option('spline_max') # /self.get_option('spline_min')) # beta = np.log(1 + self.get_option('spline_sigma')) #return self.gram
Example 30
def test_model_pq(self): """Test the model PQ matrix generation """ new_model = self.bayes.update(models={ 'simp': self.model.update_dof([4, 2])}) P, q = new_model._get_model_pq() epsilon = np.array([2, 1]) sigma = inv(np.diag(np.ones(2))) P_true = sigma q_true = -np.dot(epsilon, sigma) npt.assert_array_almost_equal(P, P_true, decimal=8) npt.assert_array_almost_equal(q, q_true, decimal=8)
Example 31
def get_sigma(self, models): r"""Returns the variance matrix variance is .. math:: \Sigma_i = \sigma_t^2 + \frac{\sigma_x^2}{V_{CJ}} **Where** - :math:`\sigma_t` is the error in time measurements - :math:`\sigma_x` is the error in sensor position - :math:`V_{CJ}` is the detonation velocity see :py:meth:`F_UNCLE.Utils.Experiment.Experiment.get_sigma` """ eos = self.check_models(models)[0] return np.diag(np.ones(self.shape()))
Example 32
def supcell(file, scale, outmode): from ababe.io.io import GeneralIO import os import numpy as np basefname = os.path.basename(file) gcell = GeneralIO.from_file(file) scale_matrix = np.diag(np.array(scale)) sc = gcell.supercell(scale_matrix) out = GeneralIO(sc) print("PROCESSING: {:}".format(file)) if outmode == 'stdio': out.write_file(fname=None, fmt='vasp') else: ofname = "{:}_SUPC.{:}".format(basefname.split('.')[0], outmode) out.write_file(ofname)
Example 33
def do_seg_tests(net, iter, save_format, dataset, layer='score', gt='label'): n_cl = net.blobs[layer].channels if save_format: save_format = save_format.format(iter) hist, loss = compute_hist(net, save_format, dataset, layer, gt) # mean loss print '>>>', datetime.now(), 'Iteration', iter, 'loss', loss # overall accuracy acc = np.diag(hist).sum() / hist.sum() print '>>>', datetime.now(), 'Iteration', iter, 'overall accuracy', acc # per-class accuracy acc = np.diag(hist) / hist.sum(1) print '>>>', datetime.now(), 'Iteration', iter, 'mean accuracy', np.nanmean(acc) # per-class IU iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) print '>>>', datetime.now(), 'Iteration', iter, 'mean IU', np.nanmean(iu) freq = hist.sum(1) / hist.sum() print '>>>', datetime.now(), 'Iteration', iter, 'fwavacc', \ (freq[freq > 0] * iu[freq > 0]).sum() return hist
Example 34
def label_accuracy_score(label_trues, label_preds, n_class): """Returns accuracy score evaluation result. - overall accuracy - mean accuracy - mean IU - fwavacc """ hist = np.zeros((n_class, n_class)) for lt, lp in zip(label_trues, label_preds): hist += _fast_hist(lt.flatten(), lp.flatten(), n_class) acc = np.diag(hist).sum() / hist.sum() acc_cls = np.diag(hist) / hist.sum(axis=1) acc_cls = np.nanmean(acc_cls) iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)) mean_iu = np.nanmean(iu) freq = hist.sum(axis=1) / hist.sum() fwavacc = (freq[freq > 0] * iu[freq > 0]).sum() return acc, acc_cls, mean_iu, fwavacc # ----------------------------------------------------------------------------- # Visualization # -----------------------------------------------------------------------------
Example 35
def fit(self, X): '''Required for featurewise_center, featurewise_std_normalization and zca_whitening. # Arguments X: Numpy array, the data to fit on. ''' if self.featurewise_center: self.mean = np.mean(X, axis=0) X -= self.mean if self.featurewise_std_normalization: self.std = np.std(X, axis=0) X /= (self.std + 1e-7) if self.zca_whitening: flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3])) sigma = np.dot(flatX.T, flatX) / flatX.shape[1] U, S, V = linalg.svd(sigma) self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
Example 36
def __init__(self): """Initialize variable used by Kalman Filter class Args: None Return: None """ self.dt = 0.005 # delta time self.A = np.array([[1, 0], [0, 1]]) # matrix in observation equations self.u = np.zeros((2, 1)) # previous state vector # (x,y) tracking object center self.b = np.array([[0], [255]]) # vector of observations self.P = np.diag((3.0, 3.0)) # covariance matrix self.F = np.array([[1.0, self.dt], [0.0, 1.0]]) # state transition mat self.Q = np.eye(self.u.shape[0]) # process noise matrix self.R = np.eye(self.b.shape[0]) # observation noise matrix self.lastResult = np.array([[0], [255]])
Example 37
def draw(vmean, vlogstd): from scipy import stats plt.cla() xlimits = [-2, 2] ylimits = [-4, 2] def log_prob(z): z1, z2 = z[:, 0], z[:, 1] return stats.norm.logpdf(z2, 0, 1.35) + \ stats.norm.logpdf(z1, 0, np.exp(z2)) plot_isocontours(ax, lambda z: np.exp(log_prob(z)), xlimits, ylimits) def variational_contour(z): return stats.multivariate_normal.pdf( z, vmean, np.diag(np.exp(vlogstd))) plot_isocontours(ax, variational_contour, xlimits, ylimits) plt.draw() plt.pause(1.0 / 30.0)
Example 38
def get_neg_log_post(Phi, sigma_J_list, ROI_list, G, MMT, q, Sigma_E, GL, nu, V, prior_on = False): eps = 1E-13 p = Phi.shape[0] n_ROI = len(sigma_J_list) Qu = Phi.dot(Phi.T) G_Sigma_G = np.zeros(MMT.shape) for i in range(n_ROI): G_Sigma_G += sigma_J_list[i]**2 * np.dot(G[:,ROI_list[i]], G[:,ROI_list[i]].T) cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T) inv_cov = np.linalg.inv(cov) eigs = np.real(np.linalg.eigvals(cov)) + eps log_det_cov = np.sum(np.log(eigs)) result = q*log_det_cov + np.trace(MMT.dot(inv_cov)) if prior_on: inv_Q = np.linalg.inv(Qu) #det_Q = np.linalg.det(Qu) log_det_Q = np.sum(np.log(np.diag(Phi)**2)) result = result + np.float(nu+p+1)*log_det_Q+ np.trace(V.dot(inv_Q)) return result #============================================================================== # update both Qu and Sigma_J, gradient of Qu and Sigma J
Example 39
def fit(self, x): s = x.shape x = x.copy().reshape((s[0],np.prod(s[1:]))) m = np.mean(x, axis=0) x -= m sigma = np.dot(x.T,x) / x.shape[0] U, S, V = linalg.svd(sigma) tmp = np.dot(U, np.diag(1./np.sqrt(S+self.regularization))) tmp2 = np.dot(U, np.diag(np.sqrt(S+self.regularization))) self.ZCA_mat = th.shared(np.dot(tmp, U.T).astype(th.config.floatX)) self.inv_ZCA_mat = th.shared(np.dot(tmp2, U.T).astype(th.config.floatX)) self.mean = th.shared(m.astype(th.config.floatX))
Example 40
def htmt(self): htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_), index=self.manifests, columns=self.manifests) mean = [] allBlocks = [] for i in range(self.lenlatent): block_ = self.Variables['measurement'][ self.Variables['latent'] == self.latent[i]] allBlocks.append(list(block_.values)) block = htmt_.ix[block_, block_] mean_ = (block - np.diag(np.diag(block))).values mean_[mean_ == 0] = np.nan mean.append(np.nanmean(mean_)) comb = [[k, j] for k in range(self.lenlatent) for j in range(self.lenlatent)] comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]])) for i in range(self.lenlatent ** 2)] comb__ = [] for i in range(self.lenlatent ** 2): block = (htmt_.ix[allBlocks[comb[i][1]], allBlocks[comb[i][0]]]).values # block[block == 1] = np.nan comb__.append(np.nanmean(block)) htmt__ = np.divide(comb__, comb_) where_are_NaNs = np.isnan(htmt__) htmt__[where_are_NaNs] = 0 htmt = pd.DataFrame(np.tril(htmt__.reshape( (self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent) return htmt
Example 41
def _solve_equation_least_squares(self, A, B): """Solve system of linear equations A X = B. Currently using Pseudo-inverse because it also allows for singular matrices. Args: A (numpy.ndarray): Left-hand side of equation. B (numpy.ndarray): Right-hand side of equation. Returns: X (numpy.ndarray): Solution of equation. """ # Pseudo-inverse X = np.dot(np.linalg.pinv(A), B) # LU decomposition # lu, piv = scipy.linalg.lu_factor(A) # X = scipy.linalg.lu_solve((lu, piv), B) # Vanilla least-squares from numpy # X, _, _, _ = np.linalg.lstsq(A, B) # QR decomposition # Q, R, P = scipy.linalg.qr(A, mode='economic', pivoting=True) # # Find first zero element in R # out = np.where(np.diag(R) == 0)[0] # if out.size == 0: # i = R.shape[0] # else: # i = out[0] # B_prime = np.dot(Q.T, B) # X = np.zeros((A.shape[1], B.shape[1]), dtype=A.dtype) # X[P[:i], :] = scipy.linalg.solve_triangular(R[:i, :i], B_prime[:i, :]) return X
Example 42
def get_whitening_matrix(X, fudge=1E-18): from numpy.linalg import eigh Xcov = numpy.dot(X.T, X)/X.shape[0] d,V = eigh(Xcov) D = numpy.diag(1./numpy.sqrt(d+fudge)) W = numpy.dot(numpy.dot(V,D), V.T) return W
Example 43
def fit(self, X, C, y, regions, kernelType, reml=True, maxiter=100): #construct a list of kernel names (one for each region) if (kernelType == 'adapt'): kernelNames = self.buildKernelAdapt(X, C, y, regions, reml, maxiter) else: kernelNames = [kernelType] * len(regions) #perform optimization kernelObj, hyp_kernels, sig2e, fixedEffects = self.optimize(X, C, y, kernelNames, regions, reml, maxiter) #compute posterior distribution Ktraintrain = kernelObj.getTrainKernel(hyp_kernels) post = self.infExact_scipy_post(Ktraintrain, C, y, sig2e, fixedEffects) #fix intercept if phenotype is binary if (len(np.unique(y)) == 2): controls = (y<y.mean()) cases = ~controls meanVec = C.dot(fixedEffects) mu, var = self.getPosteriorMeanAndVar(np.diag(Ktraintrain), Ktraintrain, post, meanVec) fixedEffects[0] -= optimize.minimize_scalar(self.getNegLL, args=(mu, np.sqrt(sig2e+var), controls, cases), method='brent').x #construct trainObj trainObj = dict([]) trainObj['sig2e'] = sig2e trainObj['hyp_kernels'] = hyp_kernels trainObj['fixedEffects'] = fixedEffects trainObj['kernelNames'] = kernelNames return trainObj
Example 44
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0): L = post['L'] if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L))) ns = diagKTestTest.shape[0] nperbatch = 5000 nact = 0 #allocate mem fmu = np.zeros(ns) #column vector (of length ns) of predictive latent means fs2 = np.zeros(ns) #column vector (of length ns) of predictive latent variances while (nact<(ns-1)): id = np.arange(nact, np.minimum(nact+nperbatch, ns)) kss = diagKTestTest[id] Ks = KtrainTest[:, id] if (len(post['alpha'].shape) == 1): try: Fmu = intercept[id] + Ks.T.dot(post['alpha']) except: Fmu = intercept + Ks.T.dot(post['alpha']) fmu[id] = Fmu else: try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha']) except: Fmu = intercept + Ks.T.dot(post['alpha']) fmu[id] = Fmu.mean(axis=1) if Lchol: V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True) fs2[id] = kss - np.sum(V**2, axis=0) #predictive variances else: fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0) #predictive variances fs2[id] = np.maximum(fs2[id],0) #remove numerical noise i.e. negative variances nact = id[-1] #set counter to index of last processed data point return fmu, fs2
Example 45
def getTestKernelDiag(self, params, Xtest): self.checkParams(params) if (len(Xtest) != len(self.kernels)): raise Exception('Xtest should be a list with length equal to #kernels!') diag = 0 params_ind = 0 for k_i, k in enumerate(self.kernels): numHyp = k.getNumParams() diag += k.getTestKernelDiag(params[params_ind:params_ind+numHyp], Xtest[k_i]) params_ind += numHyp return diag #the only parameter here is the bias term c...
Example 46
def getTestKernelDiag(self, params, Xtest): self.checkParams(params) b = np.exp(params[0]) k = np.exp(params[1]) M = 1.0 / (b + np.exp(k*self.D)) Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1]) return np.diag((Xtest_scaled).dot(M).dot(Xtest_scaled.T)) #LD kernel with exponential decay but no bias term
Example 47
def getTestKernelDiag(self, params, Xtest): self.checkParams(params) k = np.exp(params[0]) M = 1.0 / (1.0 + k*self.D) Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1]) return np.diag((Xtest_scaled).dot(M).dot(Xtest_scaled.T)) #LD kernel with polynomial decay
Example 48
def getTestKernelDiag(self, params, Xtest): self.checkParams(params) p = np.exp(params[0]) k = np.exp(params[1]) M = 1.0 / (1.0 + k*(self.D**p)) Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1]) return np.diag((Xtest_scaled).dot(M).dot(Xtest_scaled.T))
Example 49
def symmetrize(X): return X + X.T - np.diag(X.diagonal()) #this code is directly translated from the GPML Matlab package (see attached license)
Example 50
def l1_norm_off_diag(A): "convenience method for l1 norm, excluding diagonal" # let's speed this up later # assume A symmetric, sparse too return np.linalg.norm(A - np.diag(A.diagonal()), ord=1)