The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def get_local_wavenumbermesh(self, scaled=True, broadcast=False, eliminate_highest_freq=False): kx = fftfreq(self.N[0], 1./self.N[0]) ky = rfftfreq(self.N[1], 1./self.N[1]) if eliminate_highest_freq: for i, k in enumerate((kx, ky)): if self.N[i] % 2 == 0: k[self.N[i]//2] = 0 Ks = np.meshgrid(kx, ky[self.rank*self.Np[1]//2:(self.rank*self.Np[1]//2+self.Npf)], indexing='ij', sparse=True) if scaled is True: Lp = 2*np.pi/self.L Ks[0] *= Lp[0] Ks[1] *= Lp[1] K = Ks if broadcast is True: K = [np.broadcast_to(k, self.complex_shape()) for k in Ks] return K
Example 2
def _broadcast_shape(*args): """Returns the shape of the ararys that would result from broadcasting the supplied arrays against each other. """ if not args: raise ValueError('must provide at least one argument') # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape
Example 3
def get_scipy_batch_logpdf(self, idx): if not self.scipy_arg_fn: return dist_params = self.get_dist_params(idx, wrap_tensor=False) dist_params_wrapped = self.get_dist_params(idx) dist_params = self._convert_logits_to_ps(dist_params) test_data = self.get_test_data(idx, wrap_tensor=False) test_data_wrapped = self.get_test_data(idx) shape = self.pyro_dist.shape(test_data_wrapped, **dist_params_wrapped) batch_log_pdf = [] for i in range(len(test_data)): batch_params = {} for k in dist_params: param = np.broadcast_to(dist_params[k], shape) batch_params[k] = param[i] args, kwargs = self.scipy_arg_fn(**batch_params) if self.is_discrete: batch_log_pdf.append(self.scipy_dist.logpmf(test_data[i], *args, **kwargs)) else: batch_log_pdf.append(self.scipy_dist.logpdf(test_data[i], *args, **kwargs)) return batch_log_pdf
Example 4
def test_max_unbounded(self): n_batch = 7 ndim_action = 3 mu = np.random.randn(n_batch, ndim_action).astype(np.float32) mat = np.broadcast_to( np.eye(ndim_action, dtype=np.float32)[None], (n_batch, ndim_action, ndim_action)) v = np.random.randn(n_batch).astype(np.float32) q_out = action_value.QuadraticActionValue( chainer.Variable(mu), chainer.Variable(mat), chainer.Variable(v)) v_out = q_out.max self.assertIsInstance(v_out, chainer.Variable) v_out = v_out.data np.testing.assert_almost_equal(v_out, v)
Example 5
def _broadcast_shape(*args): """Returns the shape of the ararys that would result from broadcasting the supplied arrays against each other. """ if not args: raise ValueError('must provide at least one argument') # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape
Example 6
def compute_convolution_nd(data, kernel, dimension: int, mode=ConvolutionMode.valid, element_wise: bool=False): mode_string = __get_convolution_mode_string(mode) result = [] data_prefix_shape = data.shape[:-dimension] kernel_prefix_shape = kernel.shape[:-dimension] if element_wise: final_shape = element_wise_shape(data_prefix_shape, kernel_prefix_shape)[0] data = numpy.broadcast_to(data, final_shape + data.shape[-2:]) kernel = numpy.broadcast_to(kernel, final_shape + kernel.shape[-2:]) if final_shape: for index in array_index_traversal(final_shape): result.append(__compute_convolution_nd(data[index], kernel[index], dimension, mode_string)) return numpy.array(result).reshape(final_shape + result[0].shape) else: return __compute_convolution_nd(data, kernel, dimension, mode_string) else: if kernel_prefix_shape: final_shape = data_prefix_shape + kernel_prefix_shape + basic_convolution_shape(data.shape[-dimension:], kernel.shape[-dimension:], dimension, mode_string) result = numpy.zeros(final_shape) for kernel_index in array_index_traversal(kernel_prefix_shape): sub_result_index = tuple(slice(None) for _ in data_prefix_shape) + kernel_index + tuple(slice(None) for _ in range(dimension)) result[sub_result_index] = __compute_convolution_nd(data, kernel[kernel_index], dimension, mode_string) return result else: return __compute_convolution_nd(data, kernel, dimension, mode_string)
Example 7
def test_One(backend, M, N, K, alpha, beta, forward): x = indigo.util.rand64c(K,N) y = indigo.util.rand64c(M,N) B = backend() if getattr(B.onemm, '__isabstractmethod__', False): pytest.skip("backed <%s> doesn't implement onemm" % backend.__name__) if not hasattr(B, 'onemm'): pytest.skip("backend doesn't implement onemm") O = B.One((M,K), dtype=np.complex64) if forward: u, v = x, y else: v, u = x, y u_d = B.copy_array(u) v_d = B.copy_array(v) exp = beta * v + \ np.broadcast_to(alpha*u.sum(axis=0,keepdims=True), v.shape) O.eval(v_d, u_d, alpha=alpha, beta=beta, forward=forward) act = v_d.to_host() np.testing.assert_allclose(act, exp, rtol=1e-5)
Example 8
def _broadcast_shape(*args): """Returns the shape of the ararys that would result from broadcasting the supplied arrays against each other. """ if not args: raise ValueError('must provide at least one argument') if len(args) == 1: # a single argument does not work with np.broadcast return np.asarray(args[0]).shape # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape
Example 9
def _broadcast_shape(*args): """Returns the shape of the ararys that would result from broadcasting the supplied arrays against each other. """ if not args: raise ValueError('must provide at least one argument') # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape
Example 10
def broadcast_to(self, shape): """ Performs the equivalent of np.broadcast_to for COO. Parameters ---------- shape : tuple[int] The shape to broadcast the data to. Returns ------- The broadcasted sparse array. Raises ------ ValueError If the operand cannot be broadcast to the given shape. """ result_shape = self._get_broadcast_shape(self.shape, shape, is_result=True) params = self._get_broadcast_parameters(self.shape, result_shape) coords, data = self._get_expanded_coords_data(self.coords, self.data, params, result_shape) return COO(coords, data, shape=result_shape, has_duplicates=self.has_duplicates, sorted=self.sorted)
Example 11
def _broadcast_shape(*args): """Returns the shape of the arrays that would result from broadcasting the supplied arrays against each other. """ if not args: raise ValueError('must provide at least one argument') # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape
Example 12
def test_repeat_tile(self): initial_shape = (8, 4) repeats = ((3, 1, 1), (3, 3, 3), (1, 2, 1), (2, 2, 2, 2)) def _generate_noncontiguous_input(): out = np.broadcast_to(np.random.random((1, 4)), initial_shape) assert not (out.flags.c_contiguous or out.flags.f_contiguous) return out for repeat in repeats: for tensor in (torch.from_numpy(np.random.random(initial_shape)), torch.from_numpy(_generate_noncontiguous_input()),): self.assertEqual(tensor.repeat(*repeat).numpy(), np.tile(tensor.numpy(), repeat))
Example 13
def ordinal_loss(y, mask): xp = cuda.get_array_module(y.data) volatile = y.volatile b, c, n = y.data.shape max_y = F.broadcast_to(F.max(y, axis=1, keepdims=True), y.data.shape) y = y - max_y sum_y = F.broadcast_to(F.expand_dims(F.sum(y, axis=1), 1), y.data.shape) down_tri = np.tri(c, dtype=np.float32) up_tri = down_tri.T w1 = Variable(xp.asarray(down_tri.reshape(c, c, 1, 1)), volatile=volatile) w2 = Variable(xp.asarray(up_tri.reshape(c, c, 1, 1)), volatile=volatile) h = F.exp(F.expand_dims(y, -1)) h1 = F.convolution_2d(h, w1) h1 = F.convolution_2d(F.log(h1), w1) h2 = F.convolution_2d(h, w2) h2 = F.convolution_2d(F.log(h2), w2) h = F.reshape(h1 + h2, (b, c, n)) return F.sum((h - sum_y - y) * mask) / b
Example 14
def __forward(self, batch_x, batch_t, weight, train=True): xp = self.xp x = Variable(xp.asarray(batch_x), volatile=not train) t = Variable(xp.asarray(batch_t), volatile=not train) y = self.net(x, train=train) b, c, n = y.data.shape mask = Variable(xp.asarray(np.broadcast_to(weight.reshape(-1, 1, 1), (b, c, n)) * loss_mask(batch_t, self.net.rating_num)), volatile=not train) if self.ordinal_weight == 0: loss = F.sum(-F.log_softmax(y) * mask) / b elif self.ordinal_weight == 1: loss = ordinal_loss(y, mask) else: loss = (1 - self.ordinal_weight) * F.sum(-F.log_softmax(y) * mask) / b + self.ordinal_weight * ordinal_loss(y, mask) acc = self.__accuracy(y, t) return loss, acc
Example 15
def broadcast(vec: T.Tensor, matrix: T.Tensor) -> T.Tensor: """ Broadcasts vec into the shape of matrix following numpy rules: vec ~ (N, 1) broadcasts to matrix ~ (N, M) vec ~ (1, N) and (N,) broadcast to matrix ~ (M, N) Args: vec: A vector (either flat, row, or column). matrix: A matrix (i.e., a 2D tensor). Returns: tensor: A tensor of the same size as matrix containing the elements of the vector. Raises: BroadcastError """ try: return numpy.broadcast_to(vec, shape(matrix)) except ValueError: raise BroadcastError('cannot broadcast vector of dimension {} \ onto matrix of dimension {}'.format(shape(vec), shape(matrix)))
Example 16
def test_lmatvec(b0, b1, quad, format, axis, k0, k1): """Test matrix-vector product""" global c, c1, d, d1 b0 = b0(N, quad=quad) b1 = b1(N, quad=quad) mat = shenfun.spectralbase.inner_product((b0, k0), (b1, k1)) c = mat.matvec(a, c, format='csr') c1 = mat.matvec(a, c1, format=format) assert np.allclose(c, c1) d.fill(0) d1.fill(0) d = mat.matvec(b, d, format='csr', axis=axis) d1 = mat.matvec(b, d1, format=format, axis=axis) assert np.allclose(d, d1) # Test multidimensional with axis equals 1D case d1.fill(0) bc = [np.newaxis,]*3 bc[axis] = slice(None) fj = np.broadcast_to(a[bc], (N,)*3).copy() d1 = mat.matvec(fj, d1, format=format, axis=axis) cc = [0,]*3 cc[axis] = slice(None) assert np.allclose(c, d1[cc])
Example 17
def test_axis(ST, quad, axis): ST = ST(N, quad=quad, plan=True) points, weights = ST.points_and_weights(N) f_hat = np.random.random(N) B = inner_product((ST, 0), (ST, 0)) c = np.zeros_like(f_hat) c = B.solve(f_hat, c) # Multidimensional version bc = [np.newaxis,]*3 bc[axis] = slice(None) fk = np.broadcast_to(f_hat[bc], (N,)*3).copy() ST.plan((N,)*3, axis, np.float, {}) if hasattr(ST, 'bc'): ST.bc.set_tensor_bcs(ST) # To set Dirichlet boundary conditions on multidimensional array ck = np.zeros_like(fk) ck = B.solve(fk, ck, axis=axis) cc = [0,]*3 cc[axis] = slice(None) assert np.allclose(ck[cc], c) #test_axis(cbases.ShenDirichletBasis, "GC", 1)
Example 18
def _broadcast_shape(*args): """Returns the shape of the ararys that would result from broadcasting the supplied arrays against each other. """ if not args: raise ValueError('must provide at least one argument') # use the old-iterator because np.nditer does not handle size 0 arrays # consistently b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): # ironically, np.broadcast does not properly handle np.broadcast # objects (it treats them as scalars) # use broadcasting to avoid allocating the full array b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape
Example 19
def get_local_mesh(self): """Returns the local decomposed physical mesh""" X = np.ogrid[self.rank*self.Np[0]:(self.rank+1)*self.Np[0], :self.N[1], :self.N[2]] X[0] = (X[0]*self.L[0]/self.N[0]).astype(self.float) X[1] = (X[1]*self.L[1]/self.N[1]).astype(self.float) X[2] = (X[2]*self.L[2]/self.N[2]).astype(self.float) X = [np.broadcast_to(x, self.real_shape()) for x in X] return X
Example 20
def get_local_wavenumbermesh(self, scaled=False, broadcast=False, eliminate_highest_freq=False): """Returns (scaled) local decomposed wavenumbermesh If scaled is True, then the wavenumbermesh is scaled with physical mesh size. This takes care of mapping the physical domain to a computational cube of size (2pi)**3. If eliminate_highest_freq is True, then the Nyquist frequency is set to zero. """ kx, ky, kz = self.complex_local_wavenumbers() if eliminate_highest_freq: ky = fftfreq(self.N[1], 1./self.N[1]) for i, k in enumerate((kx, ky, kz)): if self.N[i] % 2 == 0: k[self.N[i]//2] = 0 ky = ky[self.complex_local_slice()[1]] Ks = np.meshgrid(kx, ky, kz, indexing='ij', sparse=True) if scaled: Lp = 2*np.pi/self.L for i in range(3): Ks[i] *= Lp[i] K = Ks if broadcast is True: K = [np.broadcast_to(k, self.complex_shape()) for k in Ks] return K
Example 21
def get_local_mesh(self): xzrank = self.comm0.Get_rank() # Local rank in xz-plane xyrank = self.comm1.Get_rank() # Local rank in xy-plane # Create the physical mesh x1 = slice(xzrank * self.N1[0], (xzrank+1) * self.N1[0], 1) x2 = slice(xyrank * self.N2[1], (xyrank+1) * self.N2[1], 1) X = np.ogrid[x1, x2, :self.N[2]] X[0] = (X[0]*self.L[0]/self.N[0]).astype(self.float) X[1] = (X[1]*self.L[1]/self.N[1]).astype(self.float) X[2] = (X[2]*self.L[2]/self.N[2]).astype(self.float) X = [np.broadcast_to(x, self.real_shape()) for x in X] return X
Example 22
def get_local_wavenumbermesh(self, scaled=False, broadcast=False, eliminate_highest_freq=False): """Returns (scaled) local decomposed wavenumbermesh If scaled is True, then the wavenumbermesh is scaled with physical mesh size. This takes care of mapping the physical domain to a computational cube of size (2pi)**3 """ s = self.complex_local_slice() kx = fftfreq(self.N[0], 1./self.N[0]).astype(int) ky = fftfreq(self.N[1], 1./self.N[1]).astype(int) kz = rfftfreq(self.N[2], 1./self.N[2]).astype(int) if eliminate_highest_freq: for i, k in enumerate((kx, ky, kz)): if self.N[i] % 2 == 0: k[self.N[i]//2] = 0 kx = kx[s[0]] kz = kz[s[2]] Ks = np.meshgrid(kx, ky, kz, indexing='ij', sparse=True) if scaled is True: Lp = 2*np.pi/self.L for i in range(3): Ks[i] = (Ks[i]*Lp[i]).astype(self.float) K = Ks if broadcast is True: K = [np.broadcast_to(k, self.complex_shape()) for k in Ks] return K
Example 23
def scalar_broadcast_match(a, b): """ Returns arguments as np.array, if one is a scalar it will broadcast the other one's shape. """ a, b = np.atleast_1d(a, b) if a.size == 1 and b.size != 1: a = np.broadcast_to(a, b.shape) elif b.size == 1 and a.size != 1: b = np.broadcast_to(b, a.shape) return a, b
Example 24
def predict(self, input_x): if isinstance(input_x, chainer.Variable): device = cuda.get_device(input_x.data) else: device = cuda.get_device(input_x) xp = self.predictor.xp with device: output = self.predictor(input_x) batch_size, input_channel, input_h, input_w = input_x.shape batch_size, _, grid_h, grid_w = output.shape x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2) x = F.sigmoid(x) y = F.sigmoid(y) conf = F.sigmoid(conf) prob = F.transpose(prob, (0, 2, 1, 3, 4)) prob = F.softmax(prob) prob = F.transpose(prob, (0, 2, 1, 3, 4)) # convert coordinates to those on the image x_shift = xp.asarray(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape)) y_shift = xp.asarray(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape)) w_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape)) h_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape)) box_x = (x + x_shift) / grid_w box_y = (y + y_shift) / grid_h box_w = F.exp(w) * w_anchor / grid_w box_h = F.exp(h) * h_anchor / grid_h return box_x, box_y, box_w, box_h, conf, prob
Example 25
def predict(self, input_x): if isinstance(input_x, chainer.Variable): device = cuda.get_device(input_x.data) else: device = cuda.get_device(input_x) xp = self.predictor.xp with device: output = self.predictor(input_x) batch_size, input_channel, input_h, input_w = input_x.shape batch_size, _, grid_h, grid_w = output.shape x, y, w, h, conf, prob = F.split_axis(F.reshape(output, (batch_size, self.predictor.n_boxes, self.predictor.n_classes+5, grid_h, grid_w)), (1, 2, 3, 4, 5), axis=2) x = F.sigmoid(x) y = F.sigmoid(y) conf = F.sigmoid(conf) prob = F.transpose(prob, (0, 2, 1, 3, 4)) prob = F.softmax(prob) prob = F.transpose(prob, (0, 2, 1, 3, 4)) # convert coordinates to those on the image x_shift = xp.asarray(np.broadcast_to(np.arange(grid_w, dtype=np.float32), x.shape)) y_shift = xp.asarray(np.broadcast_to(np.arange(grid_h, dtype=np.float32).reshape(grid_h, 1), y.shape)) w_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 0], (self.predictor.n_boxes, 1, 1, 1)), w.shape)) h_anchor = xp.asarray(np.broadcast_to(np.reshape(np.array(self.anchors, dtype=np.float32)[:, 1], (self.predictor.n_boxes, 1, 1, 1)), h.shape)) box_x = (x + x_shift) / grid_w box_y = (y + y_shift) / grid_h box_w = F.exp(w) * w_anchor / grid_w box_h = F.exp(h) * h_anchor / grid_h return box_x, box_y, box_w, box_h, conf, prob
Example 26
def test_indexing_array_weird_strides(self): # See also gh-6221 # the shapes used here come from the issue and create the correct # size for the iterator buffering size. x = np.ones(10) x2 = np.ones((10, 2)) ind = np.arange(10)[:, None, None, None] ind = np.broadcast_to(ind, (10, 55, 4, 4)) # single advanced index case assert_array_equal(x[ind], x[ind.copy()]) # higher dimensional advanced index zind = np.zeros(4, dtype=np.intp) assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
Example 27
def broadcast_to(array, shape, subok=False): """Broadcast an array to a new shape. Parameters ---------- array : array_like The array to broadcast. shape : tuple The shape of the desired array. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ ValueError If the array is not compatible with the new shape according to NumPy's broadcasting rules. Notes ----- .. versionadded:: 1.10.0 Examples -------- >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) """ return _broadcast_to(array, shape, subok=subok, readonly=True)
Example 28
def test_max_bounded(self): n_batch = 20 ndim_action = 3 mu = np.random.randn(n_batch, ndim_action).astype(np.float32) mat = np.broadcast_to( np.eye(ndim_action, dtype=np.float32)[None], (n_batch, ndim_action, ndim_action)) v = np.random.randn(n_batch).astype(np.float32) min_action, max_action = -1.3, 1.3 q_out = action_value.QuadraticActionValue( chainer.Variable(mu), chainer.Variable(mat), chainer.Variable(v), min_action, max_action) v_out = q_out.max self.assertIsInstance(v_out, chainer.Variable) v_out = v_out.data # If mu[i] is an valid action, v_out[i] should be v[i] mu_is_allowed = np.all( (min_action < mu) * (mu < max_action), axis=1) np.testing.assert_almost_equal(v_out[mu_is_allowed], v[mu_is_allowed]) # Otherwise, v_out[i] should be less than v[i] mu_is_not_allowed = ~np.all( (min_action - 1e-2 < mu) * (mu < max_action + 1e-2), axis=1) np.testing.assert_array_less( v_out[mu_is_not_allowed], v[mu_is_not_allowed])
Example 29
def test_pool_average_3d(ndarray_1x1x4x4): x = np.broadcast_to(ndarray_1x1x4x4, (1, 1, 4, 4, 4)) node = onnx.helper.make_node('AveragePool', inputs=['x'], outputs=['y'], kernel_shape=(2, 2, 2), strides=(2, 2, 2)) y = np.array([[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]], dtype=np.float32).reshape(1, 1, 2, 2, 2) ng_results = convert_and_calculate(node, [x], [y]) assert np.array_equal(ng_results, [y])
Example 30
def test_pool_global_average_3d(ndarray_1x1x4x4): x = np.broadcast_to(ndarray_1x1x4x4, (1, 1, 4, 4, 4)) node = onnx.helper.make_node('GlobalAveragePool', inputs=['x'], outputs=['y']) y = np.array([18.5], dtype=np.float32).reshape(1, 1, 1, 1, 1) ng_results = convert_and_calculate(node, [x], [y]) assert np.array_equal(ng_results, [y])
Example 31
def get_tgt_vec(self,r): """ Computes the target vector `g` in the above description """ r0,r1 = r g0 = 1/self.rsqrt0*r0 if self.wvar_pos: gout = 1/self.wsqrt*np.broadcast_to(self.b,self.shape1) g1 = 1/self.rsqrt1*r1 g = np.hstack((gout.ravel(),g0.ravel(),g1.ravel())) else: g1 = 1/self.rsqrt1*(r1-self.b) g = np.hstack((g0.ravel(),g1.ravel())) return g
Example 32
def forward_cpu(self, inputs): x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = log_softmax._log_softmax(x, self.use_cudnn) if self.cache_score: self.y = numpy.exp(log_y) if self.class_weight is not None: if self.class_weight.shape != x.shape: shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)] self.class_weight = numpy.broadcast_to( self.class_weight.reshape(shape), x.shape) log_y *= self.class_weight log_yd = numpy.rollaxis(log_y, 1) log_yd = log_yd.reshape(len(log_yd), -1) log_p = log_yd[numpy.maximum(t.ravel(), 0), numpy.arange(t.size)] # deal with the case where the SoftmaxCrossEntropy is # unpickled from the old version if self.normalize: count = (t != self.ignore_label).sum() else: count = len(x) self._coeff = 1.0 / max(count, 1) y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) \ * (-self._coeff) return y.reshape(()),
Example 33
def forward_gpu(self, inputs): cupy = cuda.cupy x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = log_softmax._log_softmax(x, self.use_cudnn) if self.cache_score: self.y = cupy.exp(log_y) if self.class_weight is not None: shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)] log_y *= cupy.broadcast_to( self.class_weight.reshape(shape), x.shape) if self.normalize: coeff = cupy.maximum(1, (t != self.ignore_label).sum()) else: coeff = max(1, len(t)) self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype) log_y = cupy.rollaxis(log_y, 1, log_y.ndim) ret = cuda.reduce( 'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out', 't == -1 ? T(0) : log_y[_j * n_channel + t]', 'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd' )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff) return ret,
Example 34
def backward_cpu(self, inputs, grad_outputs): x, t = inputs gloss = grad_outputs[0] if hasattr(self, 'y'): y = self.y.copy() else: y = log_softmax._log_softmax(x, self.use_cudnn) numpy.exp(y, out=y) if y.ndim == 2: gx = y gx[numpy.arange(len(t)), numpy.maximum(t, 0)] -= 1 if self.class_weight is not None: c = self.class_weight[ numpy.arange(len(t)), numpy.maximum(t, 0)] gx *= numpy.broadcast_to(numpy.expand_dims(c, 1), gx.shape) gx *= (t != self.ignore_label).reshape((len(t), 1)) else: # in the case where y.ndim is higher than 2, # we think that a current implementation is inefficient # because it yields two provisional arrays for indexing. n_unit = t.size // len(t) gx = y.reshape(y.shape[0], y.shape[1], -1) fst_index = numpy.arange(t.size) // n_unit trd_index = numpy.arange(t.size) % n_unit gx[fst_index, numpy.maximum(t.ravel(), 0), trd_index] -= 1 if self.class_weight is not None: c = self.class_weight.reshape(gx.shape) c = c[fst_index, numpy.maximum(t.ravel(), 0), trd_index] c = c.reshape(y.shape[0], 1, -1) gx *= numpy.broadcast_to(c, gx.shape) gx *= (t != self.ignore_label).reshape((len(t), 1, -1)) gx = gx.reshape(y.shape) gx *= gloss * self._coeff return gx, None
Example 35
def broadcast_mgrid(arrays): shape = tuple(map(len, arrays)) ndim = len(shape) result = [] for i, arr in enumerate(arrays, start=1): reshaped = np.broadcast_to(arr[[...] + [np.newaxis] * (ndim - i)], shape) result.append(reshaped) return result
Example 36
def numba_csgraph(csr, node_props=None): if node_props is None: node_props = np.broadcast_to(1., csr.shape[0]) node_props.flags.writeable = True return CSGraph(csr.indptr, csr.indices, csr.data, np.array(csr.shape, dtype=np.int32), node_props)
Example 37
def test_indexing_array_weird_strides(self): # See also gh-6221 # the shapes used here come from the issue and create the correct # size for the iterator buffering size. x = np.ones(10) x2 = np.ones((10, 2)) ind = np.arange(10)[:, None, None, None] ind = np.broadcast_to(ind, (10, 55, 4, 4)) # single advanced index case assert_array_equal(x[ind], x[ind.copy()]) # higher dimensional advanced index zind = np.zeros(4, dtype=np.intp) assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
Example 38
def broadcast_to(array, shape, subok=False): """Broadcast an array to a new shape. Parameters ---------- array : array_like The array to broadcast. shape : tuple The shape of the desired array. subok : bool, optional If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (default). Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ ValueError If the array is not compatible with the new shape according to NumPy's broadcasting rules. Notes ----- .. versionadded:: 1.10.0 Examples -------- >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) """ return _broadcast_to(array, shape, subok=subok, readonly=True)
Example 39
def _fd(self, xi, idx, delta): """Calculate the derivative along the given index using central finite difference. Parameters ---------- xi : array_like The coordinates to evaluate, with shape (..., ndim) idx : int The index to calculate the derivative on. delta : float The finite difference step size. Returns ------- val : np.multiarray.ndarray The derivatives at the given coordinates. """ if idx < 0 or idx >= self.ndim: raise ValueError('Invalid derivative index: %d' % idx) xi = np.asarray(xi, dtype=float) if xi.shape[-1] != self.ndim: raise ValueError("The requested sample points xi have dimension %d, " "but this interpolator has dimension %d" % (xi.shape[-1], self.ndim)) # use broadcasting to evaluate two points at once xtest = np.broadcast_to(xi, (2,) + xi.shape).copy() xtest[0, ..., idx] += delta / 2.0 xtest[1, ..., idx] -= delta / 2.0 val = self(xtest) ans = (val[0] - val[1]) / delta # type: np.ndarray if ans.size == 1 and not np.isscalar(ans): return ans[0] return ans
Example 40
def _fd_jacobian(self, xi, delta_list): """Calculate the Jacobian matrix using central finite difference. Parameters ---------- xi : array_like The coordinates to evaluate, with shape (..., ndim) delta_list : List[float] list of finite difference step sizes for each input. Returns ------- val : np.multiarray.ndarray The Jacobian matrices at the given coordinates. """ xi = np.asarray(xi, dtype=float) if xi.shape[-1] != self.ndim: raise ValueError("The requested sample points xi have dimension %d, " "but this interpolator has dimension %d" % (xi.shape[-1], self.ndim)) # use broadcasting to evaluate all points at once xtest = np.broadcast_to(xi, (2 * self.ndim,) + xi.shape).copy() for idx, delta in enumerate(delta_list): xtest[2 * idx, ..., idx] += delta / 2.0 xtest[2 * idx + 1, ..., idx] -= delta / 2.0 val = self(xtest) ans = np.empty(xi.shape) for idx, delta in enumerate(delta_list): ans[..., idx] = (val[2 * idx, ...] - val[2 * idx + 1, ...]) / delta return ans
Example 41
def _fix_priors_shape(self): # If priors are numbers, this function will make them into a # matrix of proper shape self.weights_prior = np.broadcast_to( self.weights_prior, (self.n_components, self.n_mix)).copy() self.means_prior = np.broadcast_to( self.means_prior, (self.n_components, self.n_mix, self.n_features)).copy() self.means_weight = np.broadcast_to( self.means_weight, (self.n_components, self.n_mix)).copy() if self.covariance_type == "full": self.covars_prior = np.broadcast_to( self.covars_prior, (self.n_components, self.n_mix, self.n_features, self.n_features)).copy() self.covars_weight = np.broadcast_to( self.covars_weight, (self.n_components, self.n_mix)).copy() elif self.covariance_type == "tied": self.covars_prior = np.broadcast_to( self.covars_prior, (self.n_components, self.n_features, self.n_features)).copy() self.covars_weight = np.broadcast_to( self.covars_weight, self.n_components).copy() elif self.covariance_type == "diag": self.covars_prior = np.broadcast_to( self.covars_prior, (self.n_components, self.n_mix, self.n_features)).copy() self.covars_weight = np.broadcast_to( self.covars_weight, (self.n_components, self.n_mix, self.n_features)).copy() elif self.covariance_type == "spherical": self.covars_prior = np.broadcast_to( self.covars_prior, (self.n_components, self.n_mix)).copy() self.covars_weight = np.broadcast_to( self.covars_weight, (self.n_components, self.n_mix)).copy()
Example 42
def _grid_distance(self, index): """ Calculate the distance grid for a single index position. This is pre-calculated for fast neighborhood calculations later on (see _calc_influence). """ # Take every dimension but the first in reverse # then reverse that list again. dimensions = np.cumprod(self.map_dimensions[1::][::-1])[::-1] coord = [] for idx, dim in enumerate(dimensions): if idx != 0: value = (index % dimensions[idx-1]) // dim else: value = index // dim coord.append(value) coord.append(index % self.map_dimensions[-1]) for idx, (width, row) in enumerate(zip(self.map_dimensions, coord)): x = np.abs(np.arange(width) - row) ** 2 dims = self.map_dimensions[::-1] if idx: dims = dims[:-idx] x = np.broadcast_to(x, dims).T if idx == 0: distance = np.copy(x) else: distance += x.T return distance
Example 43
def patCycles(s0, s1=50, return_x=False): arr = np.zeros(s0) p = 1 t = 1 c = 0 x,y = [],[] while True: arr[p:p+t] = 1 p+=t x.append(2*t) y.append(p+0.5*t) if c > s1: t+=1 c += 2 p+=t if p>s0: break arr = arr[::-1] arr = np.broadcast_to(arr, (s1, s0)) if return_x: #cycles/px: x =np.interp(np.arange(s0), y, x) return arr,1/x[::-1] else: return arr
Example 44
def compute_max_unpooling_nd(data, pooling, size, step, dimension: int): result = [] data_prefix_shape = data.shape[:-dimension] kernel_prefix_shape = pooling.shape[:-dimension] final_shape = element_wise_shape(data_prefix_shape, kernel_prefix_shape)[0] data = numpy.broadcast_to(data, final_shape + data.shape[-dimension:]) pooling = numpy.broadcast_to(pooling, final_shape + pooling.shape[-dimension:]) if final_shape: for key in array_index_traversal(final_shape): result.append(__compute_max_unpooling_nd(data[key], pooling[key], size, step, dimension)) return numpy.array(result).reshape(final_shape + result[0].shape) else: return __compute_max_unpooling_nd(data, pooling, size, step, dimension)
Example 45
def compute(self, node, input_vals, output_val, use_numpy=True): assert len(input_vals) == 2 if use_numpy: output_val[:] = np.broadcast_to(input_vals[0], input_vals[1].shape) else: gpu_op.broadcast_to(input_vals[0], output_val)
Example 46
def test_broadcast_to(): ctx = ndarray.gpu(0) shape = (200, 300) to_shape = (130, 200, 300) x = np.random.uniform(-1, 1, shape).astype(np.float32) arr_x = ndarray.array(x, ctx=ctx) arr_y = ndarray.empty(to_shape, ctx=ctx) gpu_op.broadcast_to(arr_x, arr_y) y = arr_y.asnumpy() np.testing.assert_allclose(np.broadcast_to(x, to_shape), y)
Example 47
def test_indexing_array_weird_strides(self): # See also gh-6221 # the shapes used here come from the issue and create the correct # size for the iterator buffering size. x = np.ones(10) x2 = np.ones((10, 2)) ind = np.arange(10)[:, None, None, None] ind = np.broadcast_to(ind, (10, 55, 4, 4)) # single advanced index case assert_array_equal(x[ind], x[ind.copy()]) # higher dimensional advanced index zind = np.zeros(4, dtype=np.intp) assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
Example 48
def numerical_check(test, graph, wrt_vars, order=1): backprop_graphs, numeric_grads = differentiate_n_times_num(graph, wrt_vars, order=order) for wrt_var, graph_grad, num_grad in zip(wrt_vars, backprop_graphs, numeric_grads): name = "num" + str(order) + "df_wrt_" + wrt_var.name if graph.name == "extra_exp_op": name += " as input to another op!!!" with test.subTest(name): print("---------- " + name + " ----------") print("Backprop grad:", graph_grad()) print("Numeric grad:", num_grad) broadcasted_grad = np.broadcast_to(graph_grad(), wrt_var().shape) # not necessarily the same shape arrays_allclose(broadcasted_grad, num_grad)
Example 49
def eval_graphs(my_graph, tf_graph, my_var, tf_var, n): tf_grads = 0 if tf_graph is not None: tf_grads = tf_graph.eval() my_grads = my_graph() print("---------- " + str(n) + "df w.r.t. " + str(my_var) + " ----------") print("My_val:", my_grads) print("Tf_val:", tf_grads) my_val = np.broadcast_to(my_grads, my_var.shape) tf_val = np.broadcast_to(tf_grads, my_var.shape) arrays_allclose(my_val, tf_val)
Example 50
def _eval(self): arr = [op() for op in self.operands] for i, val in enumerate(arr): if isinstance(val, numbers.Number): shp = [l for let in Einsum.split_dots(self.opnames[i]) for l in self.letter_to_dim.get(let, [1])] arr[i] = np.broadcast_to(val, shp) return np.einsum(self.op_str, *arr)