The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def f_electric(P): n = len(P) a = 1e-9*np.array([p.a for p in P]) apa = a[:, None] + a[None, :] x = 1e-9*np.array([p.x.flatten() for p in P]) R = x[:, None, :] - x[None, :, :] r = np.sqrt(np.sum(R**2, 2) + 1e-100) R0 = R / (r**3)[:, :, None] q = np.array([float(p.charge) for p in P]) const = qq**2 / (4.*np.pi*eperm*rpermw) QQ = q[:, None] * q[None, :] F = const * QQ[:, :, None] * R0 #F[np.diag_indices_from(r)] = 0. tooclose = r <= apa R0i = R / (np.maximum(a[:, None], a[None, :])**3)[:, :, None] F[tooclose] = (const * QQ[:, :, None] * R0i)[tooclose] f = np.sum(F, 1).T.reshape(3*n, 1) return f
Example 2
def kth_diag_indices(k, a): """ Get diagonal indices of 2D array 'a' offset by 'k' Parameters ---------- k : int Diagonal offset a : numpy.ndarray Input numpy 2D array Returns ------- indices : tuple of two numpy.ndarray It contain indences of elements that are at the diagonal offset by 'k'. """ rows, cols = np.diag_indices_from(a) if k < 0: return rows[:k], cols[-k:] elif k > 0: return rows[k:], cols[:-k] else: return rows, cols
Example 3
def _kalman_correct(x, P, z, H, R, gain_factor, gain_curve): PHT = np.dot(P, H.T) S = np.dot(H, PHT) + R e = z - H.dot(x) L = cholesky(S, lower=True) inn = solve_triangular(L, e, lower=True) if gain_curve is not None: q = (np.dot(inn, inn) / inn.shape[0]) ** 0.5 f = gain_curve(q) if f == 0: return inn L *= (q / f) ** 0.5 K = cho_solve((L, True), PHT.T, overwrite_b=True).T if gain_factor is not None: K *= gain_factor[:, None] U = -K.dot(H) U[np.diag_indices_from(U)] += 1 x += K.dot(z - H.dot(x)) P[:] = U.dot(P).dot(U.T) + K.dot(R).dot(K.T) return inn
Example 4
def _solve_hessian(G, Y, thY, precon, lambda_min): N, T = Y.shape # Compute the derivative of the score psidY = ne.evaluate('(- thY ** 2 + 1.) / 2.') # noqa # Build the diagonal of the Hessian, a. Y_squared = Y ** 2 if precon == 2: a = np.inner(psidY, Y_squared) / float(T) elif precon == 1: sigma2 = np.mean(Y_squared, axis=1) psidY_mean = np.mean(psidY, axis=1) a = psidY_mean[:, None] * sigma2[None, :] diagonal_term = np.mean(Y_squared * psidY) + 1. a[np.diag_indices_from(a)] = diagonal_term else: raise ValueError('precon should be 1 or 2') # Compute the eigenvalues of the Hessian eigenvalues = 0.5 * (a + a.T - np.sqrt((a - a.T) ** 2 + 4.)) # Regularize problematic_locs = eigenvalues < lambda_min np.fill_diagonal(problematic_locs, False) i_pb, j_pb = np.where(problematic_locs) a[i_pb, j_pb] += lambda_min - eigenvalues[i_pb, j_pb] # Invert the transform return (G * a.T - G.T) / (a * a.T - 1.)
Example 5
def block_covariance(self): "return average covariance within block" if self._block_covariance is None: if self.ndb <= 1: # point kriging self._block_covariance = self.unbias else: cov = list() for x1, y1, z1 in izip(self.xdb, self.ydb, self.zdb): for x2, y2, z2 in izip(self.xdb, self.ydb, self.zdb): # cov.append(self._cova3((x1, y1, z1), (x2, y2, z2))) cov.append(cova3( (x1, y1, z1), (x2, y2, z2), self.rotmat, self.maxcov, self.nst, self.it, self.cc, self.aa_hmax)) cov = np.array(cov).reshape((self.ndb, self.ndb)) cov[np.diag_indices_from(cov)] -= self.c0 self._block_covariance = np.mean(cov) return self._block_covariance
Example 6
def run(self, verbose=True): """ Conducts particle swarm optimization :param verbose: indicates whether or not to print progress regularly :return: best member of swarm and objective function value of best member of swarm """ self._clear() for i in range(self.max_steps): self.cur_steps += 1 if verbose and ((i + 1) % 100 == 0): print(self) u1 = zeros((self.swarm_size, self.swarm_size)) u1[diag_indices_from(u1)] = [random() for x in range(self.swarm_size)] u2 = zeros((self.swarm_size, self.swarm_size)) u2[diag_indices_from(u2)] = [random() for x in range(self.swarm_size)] vel_new = (self.c1 * self.vel) + \ (self.c2 * dot(u1, (self.best - self.pos))) + \ (self.c3 * dot(u2, (self.global_best - self.pos))) pos_new = self.pos + vel_new self._best(self.pos, pos_new) self.pos = pos_new self.scores = self._score(self.pos) self._global_best() if self._objective(self.global_best[0]) < (self.min_objective or 0): print("TERMINATING - REACHED MINIMUM OBJECTIVE") return self.global_best[0], self._objective(self.global_best[0]) print("TERMINATING - REACHED MAXIMUM STEPS") return self.global_best[0], self._objective(self.global_best[0])
Example 7
def build_base_operator(self, t): """ :param t: Not used as mu and sigma are constant :return: """ # Update drift and volatility self.build_moment_vectors(t) base_operator = np.zeros((self.d, self.d)) nabla = linalg.block_diag(*[self.build_gradient_matrix(x) for x in range(1, self.d - 1)]) moments = np.zeros(2 * (self.d - 2)) for i in range(0, self.d - 2): moments[2 * i] = self.drift[i + 1] moments[2 * i + 1] = self.volatility[i + 1] generator_elements = linalg.solve(nabla, moments) r_idx, c_idx = np.diag_indices_from(base_operator) base_operator[r_idx[1:-1], c_idx[:-2]] = generator_elements[::2] base_operator[r_idx[1:-1], c_idx[2:]] = generator_elements[1::2] np.fill_diagonal(base_operator, -np.sum(base_operator, axis=1)) # -- Boundary Condition: Volatility Matching -- nabla_0 = self.grid[1] - self.grid[0] base_operator[0, 0] = - 1. * self.volatility[0] / (nabla_0 * nabla_0) base_operator[0, 1] = - base_operator[0, 0] nabla_d = self.grid[self.d - 1] - self.grid[self.d - 2] base_operator[self.d - 1, self.d - 1] = - 1. * self.volatility[self.d - 1] / (nabla_d * nabla_d) base_operator[self.d - 1, self.d - 2] = - base_operator[self.d - 1, self.d - 1] # ---------------------------------------------- self.sanity_check_base_operator(base_operator) return base_operator
Example 8
def solve(self): DI = np.diag_indices_from(self.SII) D = self.SII[DI] #for i in xrange(1, self.SII.shape[0]): self.SII[i:,i-1] = self.SII[i-1,i:] self.SII[DI] += np.mean(D) _, self.W, _ = sposv(self.SII, self.SIO, overwrite_a=True, overwrite_b=False) #, lower=1) #self.SII[DI] = D
Example 9
def divide_diagonal_by_2(CHI0, div_fact = 2.): CHI = CHI0.copy(); CHI[np.diag_indices_from(CHI)] /= div_fact return CHI
Example 10
def _add_to_diag(A, z): B = A.copy() B[np.diag_indices_from(B)] + z return B
Example 11
def from_quat(quat): """Create a direction cosine matrix from a quaternion. First 3 elements of the quaternion form its vector part. Parameters ---------- quat : array_like, shape (4,) or (n, 4) Quaternions. Returns ------- dcm : ndarray, shape (3, 3) or (n, 3, 3) Direction cosine matrices. """ q = np.asarray(quat) if q.ndim == 1: rho = q[:3] q4 = q[3] rho_skew = _skew_matrix_single(rho) dcm = 2 * (np.outer(rho, rho) + q4 * rho_skew) dcm[np.diag_indices_from(dcm)] += q4**2 - np.dot(rho, rho) else: rho = q[:, :3] q4 = q[:, 3] rho_skew = _skew_matrix_array(rho) dcm = 2 * (rho[:, None, :] * rho[:, :, None] + q4[:, None, None] * rho_skew) diag = q4**2 - np.sum(rho**2, axis=1) dcm[:, np.arange(3), np.arange(3)] += diag[:, None] return dcm
Example 12
def fun(self, x): dx, dy, dz = self._compute_coordinate_deltas(x) with np.errstate(divide='ignore'): dm1 = (dx**2 + dy**2 + dz**2) ** -0.5 dm1[np.diag_indices_from(dm1)] = 0 return 0.5 * np.sum(dm1)
Example 13
def grad(self, x): dx, dy, dz = self._compute_coordinate_deltas(x) with np.errstate(divide='ignore'): dm3 = (dx**2 + dy**2 + dz**2) ** -1.5 dm3[np.diag_indices_from(dm3)] = 0 grad_x = -np.sum(dx * dm3, axis=1) grad_y = -np.sum(dy * dm3, axis=1) grad_z = -np.sum(dz * dm3, axis=1) return np.hstack((grad_x, grad_y, grad_z))
Example 14
def tail_correlations(corrmat, tails, mask=None): """Compute the mean within and between tail correlations.""" assert corrmat.shape == (len(tails), len(tails)) if mask is not None: assert corrmat.shape == mask.shape def is_within(pair): a, b = pair return a == b # Identify cells in the matrix that represent within or between pairs pairs = product(tails, tails) wmat = np.reshape(map(is_within, pairs), corrmat.shape) bmat = ~wmat # Possibly exclude cells with the mask if mask is not None: wmat &= mask bmat &= mask # Remove the diagonal from the within matrix wmat[np.diag_indices_from(wmat)] = False # Average the correlations for within and between pairs corr_w = corrmat[wmat].mean() corr_b = corrmat[bmat].mean() return corr_w, corr_b
Example 15
def block_covariance(self): "the block covariance" if self._block_covariance is None: self._block_covariance = 0 if self.ndb <= 1: # point kriging self._block_covariance = self.unbias else: # block kriging cov = list() for x1, y1 in izip(self.xdb, self.ydb): for x2, y2 in izip(self.xdb, self.ydb): cov.append(self._cova2(x1, y1, x2, y2)) cov = np.array(cov).reshape((self.ndb, self.ndb)) cov[np.diag_indices_from(cov)] -= self.c0 self._block_covariance = np.mean(cov) return self._block_covariance
Example 16
def kth_diag_indices(array, diag_k): """Return a tuple of indices for retrieving the k'th diagonal of matrix a. :param array: Input matrix. :type array: :class:`numpy array <numpy.ndarray>` :param int diag_k: Diagonal to index. 0 is the centre, 1 is the first \ diagonal below the centre, -1 is the first diagonal above \ the centre. >>> my_matrix = np.array([[ 0, -1, -2, -3], ... [ 1, 0, -1, -2], ... [ 2, 1, 0, -1], ... [ 3, 2, 1, 0]]) >>> matrix.kth_diag_indices(my_matrix, 1) (array([1, 2, 3]), array([0, 1, 2])) >>> my_matrix[ ... matrix.kth_diag_indices(my_matrix, 1) ... ] array([1, 1, 1]) >>> my_matrix[ ... matrix.kth_diag_indices(my_matrix, -2) ... ] array([-2, -2]) """ rows, cols = np.diag_indices_from(array) if diag_k < 0: return rows[:diag_k], cols[-diag_k:] if diag_k > 0: return rows[diag_k:], cols[:-diag_k] return rows, cols
Example 17
def evaluate(self, x): try: return multivariate_normal.logpdf(x, self.mean, self.cov) except: self.cov[np.diag_indices_from(self.cov)] += 1e-4 return multivariate_normal.logpdf(x, self.mean, self.cov)
Example 18
def _calculate_reduced_likelihood_params(self, thetas=None): """ Calculate quantity with same maximum location as the log-likelihood for a given theta. Parameters ---------- thetas : ndarray, optional Given input correlation coefficients. If none given, uses self.thetas from training. """ if thetas is None: thetas = self.thetas X, Y = self.X, self.Y params = {} # Correlation Matrix distances = np.zeros((self.n_samples, self.n_dims, self.n_samples)) for i in range(self.n_samples): distances[i, :, i + 1:] = np.abs(X[i, ...] - X[i + 1:, ...]).T distances[i + 1:, :, i] = distances[i, :, i + 1:].T R = np.exp(-thetas.dot(np.square(distances))) R[np.diag_indices_from(R)] = 1. + self.nugget [U, S, Vh] = linalg.svd(R) # Penrose-Moore Pseudo-Inverse: # Given A = USV^* and Ax=b, the least-squares solution is # x = V S^-1 U^* b. # Tikhonov regularization is used to make the solution significantly # more robust. h = 1e-8 * S[0] inv_factors = S / (S ** 2. + h ** 2.) alpha = Vh.T.dot(np.einsum('j,kj,kl->jl', inv_factors, U, Y)) logdet = -np.sum(np.log(inv_factors)) sigma2 = np.dot(Y.T, alpha).sum(axis=0) / self.n_samples reduced_likelihood = -(np.log(np.sum(sigma2)) + logdet / self.n_samples) params['alpha'] = alpha params['sigma2'] = sigma2 * np.square(self.Y_std) params['S_inv'] = inv_factors params['U'] = U params['Vh'] = Vh return reduced_likelihood, params
Example 19
def test_krr_cmat(): test_dir = os.path.dirname(os.path.realpath(__file__)) # Parse file containing PBE0/def2-TZVP heats of formation and xyz filenames data = get_energies(test_dir + "/data/hof_qm7.txt") # Generate a list of qml.Compound() objects mols = [] for xyz_file in sorted(data.keys())[:1000]: # Initialize the qml.Compound() objects mol = qml.Compound(xyz=test_dir + "/qm7/" + xyz_file) # Associate a property (heat of formation) with the object mol.properties = data[xyz_file] # This is a Molecular Coulomb matrix sorted by row norm mol.generate_coulomb_matrix(size=23, sorting="row-norm") mols.append(mol) # Shuffle molecules np.random.seed(666) np.random.shuffle(mols) # Make training and test sets n_test = 300 n_train = 700 training = mols[:n_train] test = mols[-n_test:] # List of representations X = np.array([mol.representation for mol in training]) Xs = np.array([mol.representation for mol in test]) # List of properties Y = np.array([mol.properties for mol in training]) Ys = np.array([mol.properties for mol in test]) # Set hyper-parameters sigma = 10**(4.2) llambda = 10**(-10.0) # Generate training Kernel K = laplacian_kernel(X, X, sigma) # Solve alpha K[np.diag_indices_from(K)] += llambda alpha = cho_solve(K,Y) # Calculate prediction kernel Ks = laplacian_kernel(X, Xs, sigma) Yss = np.dot(Ks.transpose(), alpha) mae = np.mean(np.abs(Ys - Yss)) print(mae)