Python numpy.multiply() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def update(self,frame,events):
        falloff = self.falloff

        img = frame.img
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]

        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img[:] = np.multiply(img, cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB), casting="unsafe") 

Example 2

def svgd_kernel(self, h = -1):
        sq_dist = pdist(self.theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(self.theta.shape[0]+1))

        # compute the rbf kernel
        
        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, self.theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(self.theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy) 

Example 3

def EStep(self):
    P = np.zeros((self.M, self.N))

    for i in range(0, self.M):
      diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
      diff    = np.multiply(diff, diff)
      P[i, :] = P[i, :] + np.sum(diff, axis=1)

    c = (2 * np.pi * self.sigma2) ** (self.D / 2)
    c = c * self.w / (1 - self.w)
    c = c * self.M / self.N

    P = np.exp(-P / (2 * self.sigma2))
    den = np.sum(P, axis=0)
    den = np.tile(den, (self.M, 1))
    den[den==0] = np.finfo(float).eps

    self.P   = np.divide(P, den)
    self.Pt1 = np.sum(self.P, axis=0)
    self.P1  = np.sum(self.P, axis=1)
    self.Np  = np.sum(self.P1) 

Example 4

def derivative(self, input=None):
        """The derivative of sigmoid is
        
        .. math:: \\frac{dy}{dx} & = (1-\\varphi(x)) \\otimes \\varphi(x)  \\\\
                  & = \\frac{e^{-x}}{(1+e^{-x})^2} \\\\
                  & = \\frac{e^x}{(1+e^x)^2}
        
        Returns
        -------
        float32
            The derivative of sigmoid function.
        """
        last_forward = self.forward(input) if input else self.last_forward
        return np.multiply(last_forward, 1 - last_forward)


# sigmoid-end
# tanh-start 

Example 5

def svgd_kernel(self, theta, h = -1):
        sq_dist = pdist(theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(theta.shape[0]+1))

        # compute the rbf kernel
        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy) 

Example 6

def gradientDescent(X, y, theta, alpha, iters):
    temp = np.matrix(np.zeros(theta.shape))
    params = int(theta.ravel().shape[1]) #flattens
    cost = np.zeros(iters)

    for i in range(iters):
        err = (X * theta.T) - y
        
        for j in range(params):
            term = np.multiply(err, X[:,j])
            temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
        
        theta = temp
        cost[i] = computeCost(X, y, theta)
    
    return theta, cost 

Example 7

def computeCost(X, y, theta):
    inner = np.power(((X * theta.T) - y), 2)
    return np.sum(inner) / (2 * len(X))

#def gradientDescent(X, y, theta, alpha, iters):
#    temp = np.matrix(np.zeros(theta.shape))
#    params = int(theta.ravel().shape[1]) #flattens
#    cost = np.zeros(iters)
#
#    for i in range(iters):
#        err = (X * theta.T) - y
#        
#        for j in range(params):
#            term = np.multiply(err, X[:,j])
#            temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
#        
#        theta = temp
#        cost[i] = computeCost(X, y, theta)
#    
#    return theta, cost 

Example 8

def _extract_images(filename):
        """???????????????????

        :param filename: ?????
        :return: 4??numpy??[index, y, x, depth]? ???np.float32
        """
        images = []
        print('Extracting {}'.format(filename))
        with gzip.GzipFile(fileobj=open(filename, 'rb')) as f:
            buf = f.read()
            index = 0
            magic, num_images, rows, cols = struct.unpack_from('>IIII', buf, index)
            if magic != 2051:
                raise ValueError('Invalid magic number {} in MNIST image file: {}'.format(magic, filename))
            index += struct.calcsize('>IIII')
            for i in range(num_images):
                img = struct.unpack_from('>784B', buf, index)
                index += struct.calcsize('>784B')
                img = np.array(img, dtype=np.float32)
                # ????[0,255]???[0,1]
                img = np.multiply(img, 1.0 / 255.0)
                img = img.reshape(rows, cols, 1)
                images.append(img)
        return np.array(images, dtype=np.float32) 

Example 9

def get_max_q_values(
        self,
        next_states: np.ndarray,
        possible_next_actions: Optional[np.ndarray] = None,
        use_target_network: Optional[bool] = True
    ) -> np.ndarray:
        q_values = self.get_q_values_all_actions(
            next_states, use_target_network
        )

        if possible_next_actions is not None:
            mask = np.multiply(
                np.logical_not(possible_next_actions),
                self.ACTION_NOT_POSSIBLE_VAL
            )
            q_values += mask

        return np.max(q_values, axis=1, keepdims=True) 

Example 10

def gen_training_data(
    num_features,
    num_training_samples,
    num_outputs,
    noise_scale=0.1,
):
    np.random.seed(0)
    random.seed(1)
    input_distribution = stats.norm()
    training_inputs = input_distribution.rvs(
        size=(num_training_samples, num_features)
    ).astype(np.float32)
    weights = np.random.normal(size=(num_outputs, num_features)
                              ).astype(np.float32).transpose()
    noise = np.multiply(
        np.random.normal(size=(num_training_samples, num_outputs)), noise_scale
    )
    training_outputs = (np.dot(training_inputs, weights) +
                        noise).astype(np.float32)

    return training_inputs, training_outputs, weights, input_distribution 

Example 11

def make_tfidf(arr):
    '''input, numpy array with flavor counts for each recipe and compounds
    return numpy array adjusted as tfidf
    '''
    arr2 = arr.copy()
    N=arr2.shape[0]
    l2_rows = np.sqrt(np.sum(arr2**2, axis=1)).reshape(N, 1)
    l2_rows[l2_rows==0]=1
    arr2_norm = arr2/l2_rows

    arr2_freq = np.sum(arr2_norm>0, axis=0)
    arr2_idf = np.log(float(N+1) / (1.0 + arr2_freq)) + 1.0

    from sklearn.preprocessing import normalize
    tfidf = np.multiply(arr2_norm, arr2_idf)
    tfidf = normalize(tfidf, norm='l2', axis=1)
    print tfidf.shape
    return tfidf 

Example 12

def make_tfidf(arr):
    '''input, numpy array with flavor counts for each recipe and compounds
    return numpy array adjusted as tfidf
    '''
    arr2 = arr.copy()
    N=arr2.shape[0]
    l2_rows = np.sqrt(np.sum(arr2**2, axis=1)).reshape(N, 1)
    l2_rows[l2_rows==0]=1
    arr2_norm = arr2/l2_rows

    arr2_freq = np.sum(arr2_norm>0, axis=0)
    arr2_idf = np.log(float(N+1) / (1.0 + arr2_freq)) + 1.0

    from sklearn.preprocessing import normalize
    tfidf = np.multiply(arr2_norm, arr2_idf)
    tfidf = normalize(tfidf, norm='l2', axis=1)
    print tfidf.shape
    return tfidf 

Example 13

def train(self, training_data_array):
        for data in training_data_array:
            # ??????????
            y1 = np.dot(np.mat(self.theta1), np.mat(data.y0).T)
            sum1 = y1 + np.mat(self.input_layer_bias)
            y1 = self.sigmoid(sum1)

            y2 = np.dot(np.array(self.theta2), y1)
            y2 = np.add(y2, self.hidden_layer_bias)
            y2 = self.sigmoid(y2)

            # ??????????
            actual_vals = [0] * 10
            actual_vals[data.label] = 1
            output_errors = np.mat(actual_vals).T - np.mat(y2)
            hidden_errors = np.multiply(np.dot(np.mat(self.theta2).T, output_errors), self.sigmoid_prime(sum1))

            # ???????????
            self.theta1 += self.LEARNING_RATE * np.dot(np.mat(hidden_errors), np.mat(data.y0))
            self.theta2 += self.LEARNING_RATE * np.dot(np.mat(output_errors), np.mat(y1).T)
            self.hidden_layer_bias += self.LEARNING_RATE * output_errors
            self.input_layer_bias += self.LEARNING_RATE * hidden_errors 

Example 14

def ct2lg(dX, dY, dZ, lat, lon):

    n = dX.size
    R = np.zeros((3, 3, n))

    R[0, 0, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[0, 1, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[0, 2, :] = np.cos(np.deg2rad(lat))
    R[1, 0, :] = -np.sin(np.deg2rad(lon))
    R[1, 1, :] = np.cos(np.deg2rad(lon))
    R[1, 2, :] = np.zeros((1, n))
    R[2, 0, :] = np.multiply(np.cos(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[2, 1, :] = np.multiply(np.cos(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[2, 2, :] = np.sin(np.deg2rad(lat))

    dxdydz = np.column_stack((np.column_stack((dX, dY)), dZ))

    RR = np.reshape(R[0, :, :], (3, n))
    dx = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[1, :, :], (3, n))
    dy = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[2, :, :], (3, n))
    dz = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)

    return dx, dy, dz 

Example 15

def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz 

Example 16

def ct2lg(dX, dY, dZ, lat, lon):

    n = dX.size
    R = np.zeros((3, 3, n))

    R[0, 0, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[0, 1, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[0, 2, :] = np.cos(np.deg2rad(lat))
    R[1, 0, :] = -np.sin(np.deg2rad(lon))
    R[1, 1, :] = np.cos(np.deg2rad(lon))
    R[1, 2, :] = np.zeros((1, n))
    R[2, 0, :] = np.multiply(np.cos(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[2, 1, :] = np.multiply(np.cos(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[2, 2, :] = np.sin(np.deg2rad(lat))

    dxdydz = np.column_stack((np.column_stack((dX, dY)), dZ))

    RR = np.reshape(R[0, :, :], (3, n))
    dx = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[1, :, :], (3, n))
    dy = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[2, :, :], (3, n))
    dz = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)

    return dx, dy, dz 

Example 17

def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz 

Example 18

def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz 

Example 19

def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 

Example 20

def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 

Example 21

def phaseSensitive(self):
		"""
			Computation of Phase Sensitive Mask. As appears in :
			H Erdogan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux,
	   		"Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,"
	   		in ICASSP 2015, Brisbane, April, 2015.

		Args:
			mTarget:   (2D ndarray) Magnitude Spectrogram of the target component
			pTarget:   (2D ndarray) Phase Spectrogram of the output component
			mY:        (2D ndarray) Magnitude Spectrogram of the residual component
			pY:        (2D ndarray) Phase Spectrogram of the residual component
		Returns:
			mask:      (2D ndarray) Array that contains time frequency gain values

		"""
		print('Phase Sensitive Masking.')
		# Compute Phase Difference
		Theta = (self._pTarget - self._pY)
		self._mask = 2./ (1. + np.exp(-np.multiply(np.divide(self._sTarget, self._eps + self._nResidual), np.cos(Theta)))) - 1. 

Example 22

def __init__(self,
                 images,
                 labels,
                 dtype=dtypes.float32,
                 reshape=True):

        dtype = dtypes.as_dtype(dtype).base_dtype
        if dtype not in (dtypes.uint8, dtypes.float32):
            raise TypeError('Invalid image dtype %r, expected uint8 or float32' %dtype)

        self._num_examples = images.shape[0]

        if dtype == dtypes.float32:
            # Convert from [0, 255] -> [0.0, 1.0].
            images = images.astype(np.float32)
            images = np.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0 

Example 23

def test_wrap_with_iterable(self):
        # test fix for bug #1026:

        class with_wrap(np.ndarray):
            __array_priority__ = 10

            def __new__(cls):
                return np.asarray(1).view(cls).copy()

            def __array_wrap__(self, arr, context):
                return arr.view(type(self))

        a = with_wrap()
        x = ncu.multiply(a, (1, 2, 3))
        self.assertTrue(isinstance(x, with_wrap))
        assert_array_equal(x, np.array((1, 2, 3))) 

Example 24

def test_out_override(self):
        # 2016-01-29: NUMPY_UFUNC_DISABLED
        return

        # regression test for github bug 4753
        class OutClass(np.ndarray):
            def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
                if 'out' in kw:
                    tmp_kw = kw.copy()
                    tmp_kw.pop('out')
                    func = getattr(ufunc, method)
                    kw['out'][...] = func(*inputs, **tmp_kw)

        A = np.array([0]).view(OutClass)
        B = np.array([5])
        C = np.array([6])
        np.multiply(C, B, A)
        assert_equal(A[0], 30)
        assert_(isinstance(A, OutClass))
        A[0] = 0
        np.multiply(C, B, out=A)
        assert_equal(A[0], 30)
        assert_(isinstance(A, OutClass)) 

Example 25

def predictions_for_tiles(test_images, model):
    """Batch predictions on the test image set, to avoid a memory spike."""
    npy_test_images = numpy.array([img_loc_tuple[0] for img_loc_tuple in test_images])
    test_images = npy_test_images.astype(numpy.float32)
    test_images = numpy.multiply(test_images, 1.0 / 255.0)

    all_predictions = []
    for x in range(0, len(test_images) - 100, 100):
        for p in model.predict(test_images[x:x + 100]):
            all_predictions.append(p)

    for p in model.predict(test_images[len(all_predictions):]):
        all_predictions.append(p)
    assert len(all_predictions) == len(test_images)

    return all_predictions 

Example 26

def knn_masked_data(trX,trY,missing_data_dir, input_shape, k):
    
    raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str)
    raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str)
    # Using 'brute' method since we only want to do one query per classifier
    # so this will be quicker as it avoids overhead of creating a search tree
    knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k)
    prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1)))
    total_images = raw_im_data.shape[0]
    pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start()
    for i in range(total_images):
        mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape))
        mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations
        v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape))
        rep_mask = np.tile(mask,(trX.shape[0],1))
        # Corrupt whole training set according to the current mask
        corr_trX = np.multiply(trX, rep_mask)        
        knn_m.fit(corr_trX, trY)
        prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1))
        pbar.update(i)
    pbar.finish()
    return prob_Y_hat 

Example 27

def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type):
    fcoefs, f = make_erb_filters(sr, channel_number, 50)
    fcoefs = np.flipud(fcoefs)
    xf = erb_frilter_bank(xx, fcoefs)

    if win_type == 'hanning':
        window = np.hanning(channel_number)
    elif win_type == 'hamming':
        window = np.hamming(channel_number)
    elif win_type == 'triangle':
        window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1)))
    else:
        window = np.ones(channel_number)
    window = window.reshape((channel_number, 1))

    xe = np.power(xf, 2.0)
    frames = 1 + ((np.size(xe, 1)-win_len) // shift_len)
    cochleagram = np.zeros((channel_number, frames))
    for i in range(frames):
        one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1))
        cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1))

    cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram)
    return cochleagram 

Example 28

def log_power_spectrum_extractor(x, win_len, shift_len, win_type, is_log=False):
    samples = x.shape[0]
    frames = (samples - win_len) // shift_len
    stft = np.zeros((win_len, frames), dtype=np.complex64)
    spect = np.zeros((win_len // 2 + 1, frames), dtype=np.float64)

    if win_type == 'hanning':
        window = np.hanning(win_len)
    elif win_type == 'hamming':
        window = np.hamming(win_len)
    elif win_type == 'rectangle':
        window = np.ones(win_len)

    for i in range(frames):
        one_frame = x[i*shift_len: i*shift_len+win_len]
        windowed_frame = np.multiply(one_frame, window)
        stft[:, i] = np.fft.fft(windowed_frame, win_len)
        if is_log:
            spect[:, i] = np.log(np.power(np.abs(stft[0: win_len//2+1, i]), 2.))
        else:
            spect[:, i] = np.power(np.abs(stft[0: win_len//2+1, i]), 2.)

    return spect 

Example 29

def stft_extractor(x, win_len, shift_len, win_type):
    samples = x.shape[0]
    frames = (samples - win_len) // shift_len
    stft = np.zeros((win_len, frames), dtype=np.complex64)
    spect = np.zeros((win_len // 2 + 1, frames), dtype=np.complex64)

    if win_type == 'hanning':
        window = np.hanning(win_len)
    elif win_type == 'hamming':
        window = np.hamming(win_len)
    elif win_type == 'rectangle':
        window = np.ones(win_len)

    for i in range(frames):
        one_frame = x[i*shift_len: i*shift_len+win_len]
        windowed_frame = np.multiply(one_frame, window)
        stft[:, i] = np.fft.fft(windowed_frame, win_len)
        spect[:, i] = stft[: win_len//2+1, i]

    return spect 

Example 30

def spectrum_extractor(x, win_len, shift_len, win_type, is_log):
    samples = x.shape[0]
    frames = (samples - win_len) // shift_len
    stft = np.zeros((win_len, frames), dtype=np.complex64)
    spectrum = np.zeros((win_len // 2 + 1, frames), dtype=np.float64)

    if win_type == 'hanning':
        window = np.hanning(win_len)
    elif win_type == 'hamming':
        window = np.hamming(win_len)
    elif win_type == 'triangle':
        window = (1 - (np.abs(win_len - 1 - 2 * np.arange(1, win_len + 1, 1)) / (win_len + 1)))
    else:
        window = np.ones(win_len)
    for i in range(frames):
        one_frame = x[i*shift_len: i*shift_len+win_len]
        windowed_frame = np.multiply(one_frame, window)
        stft[:, i] = np.fft.fft(windowed_frame, win_len)
        if is_log:
            spectrum[:, i] = np.log(np.abs(stft[0: win_len//2+1, i]))
        else:
            spectrum[:, i] = np.abs(stft[0: win_len // 2 + 1:, i])

    return spectrum 

Example 31

def get_volume_of_dose(self, dose, **kwargs):
        volumes = np.zeros(self.count)
        for x in range(0, self.count):

            dvh = np.zeros(len(self.dvh))
            for y in range(0, len(self.dvh)):
                dvh[y] = self.dvh[y][x]
            if 'input' in kwargs and kwargs['input'] == 'relative':
                if isinstance(self.rx_dose[x], basestring):
                    volumes[x] = 0
                else:
                    volumes[x] = volume_of_dose(dvh, dose * self.rx_dose[x])
            else:
                volumes[x] = volume_of_dose(dvh, dose)

        if 'output' in kwargs and kwargs['output'] == 'absolute':
            volumes = np.multiply(volumes, self.volume[0:self.count])
        else:
            volumes = np.multiply(volumes, 100.)

        return volumes.tolist() 

Example 32

def phaseSensitive(self):
		"""
			Computation of Phase Sensitive Mask. As appears in :
			H Erdogan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux,
	   		"Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,"
	   		in ICASSP 2015, Brisbane, April, 2015.

		Args:
			mTarget:   (2D ndarray) Magnitude Spectrogram of the target component
			pTarget:   (2D ndarray) Phase Spectrogram of the output component
			mY:        (2D ndarray) Magnitude Spectrogram of the output component
			pY:        (2D ndarray) Phase Spectrogram of the output component
		Returns:
			mask:      (2D ndarray) Array that contains time frequency gain values

		"""
		print('Phase Sensitive Masking.')
		# Compute Phase Difference
		Theta = (self._pTarget - self._pY)
		self._mask = 2./ (1. + np.exp(-np.multiply(np.divide(self._sTarget, self._eps + self._nResidual), np.cos(Theta)))) - 1. 

Example 33

def next_frame(self, pixels, t, collaboration_state, osc_data):

        # render every 2 frames so the ripples are slower
        self.frameCount += 1
        if (self.frameCount % 2 == 0):
            pixels[:, :] = self.get_pixels()
            return

        # only generate a ripple every couple frames
        if (random.random() < 0.12):
            self.start_ripple()

        # calculate a pixel values based on it's neighbors
        self.ripple_state[1:-1, 1:-1] = (
            self.previous_ripple_state[:-2, 1:-1] + self.previous_ripple_state[2:, 1:-1] +
            self.previous_ripple_state[1:-1, :-2] + self.previous_ripple_state[1:-1, 2:]
        ) * 0.5 - self.ripple_state[1:-1, 1:-1]

        # damping
        # numpy doesn't like multiplying ints and floats so tell it to be unsafe
        np.multiply(self.ripple_state, self.damping, out=self.ripple_state, casting='unsafe')

        pixels[:, :] = self.get_pixels()
        self.swap_buffers() 

Example 34

def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 

Example 35

def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 

Example 36

def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 

Example 37

def imresizemex(inimg, weights, indices, dim):
    in_shape = inimg.shape
    w_shape = weights.shape
    out_shape = list(in_shape)
    out_shape[dim] = w_shape[0]
    outimg = np.zeros(out_shape)
    if dim == 0:
        for i_img in range(in_shape[1]):
            for i_w in range(w_shape[0]):
                w = weights[i_w, :]
                ind = indices[i_w, :]
                im_slice = inimg[ind, i_img].astype(np.float64)
                outimg[i_w, i_img] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)
    elif dim == 1:
        for i_img in range(in_shape[0]):
            for i_w in range(w_shape[0]):
                w = weights[i_w, :]
                ind = indices[i_w, :]
                im_slice = inimg[i_img, ind].astype(np.float64)
                outimg[i_img, i_w] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)        
    if inimg.dtype == np.uint8:
        outimg = np.clip(outimg, 0, 255)
        return np.around(outimg).astype(np.uint8)
    else:
        return outimg 

Example 38

def test_cputensor_multiply_constant():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_c = np.multiply(np_a, 2)

    a = ng.constant(np_a, [M, N])
    b = ng.constant(2)
    c = ng.multiply(a, b)

    with executor(c) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_c) 

Example 39

def test_cputensor_fusion():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[3, 2, 1]], dtype=np.float32)
    np_d = np.multiply(np_b, np.add(np_a, 2))

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = ng.constant(2)
    d = ng.multiply(b, ng.add(a, c))

    with executor(d) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_d) 

Example 40

def discrete_uniform(self, low, high, quantum, axes, dtype=None):
        """
        Returns a tensor initialized with a discrete uniform distribution.

        Arguments:
            low: The lower limit of the values.
            high: The upper limit of the values.
            quantum: Distance between values.
            axes: The axes of the tensor.

        Returns:
            The tensor.

        """
        if dtype is None:
            dtype = self.dtype

        n = math.floor((high - low) / quantum)
        result = np.array(self.rng.random_integers(
            0, n, ng.make_axes(axes).lengths), dtype=dtype)
        np.multiply(result, quantum, result)
        np.add(result, low, result)
        return result 

Example 41

def __init__(self, images, labels, fake_data=False):
        if fake_data:
            self._num_examples = 10000
        else:
            assert images.shape[0] == labels.shape[0], (
                "images.shape: %s labels.shape: %s" % (images.shape,
                                                       labels.shape))
            self._num_examples = images.shape[0]
            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns] (assuming depth == 1)
            assert images.shape[3] == 1
            images = images.reshape(images.shape[0],
                                    images.shape[1] * images.shape[2])
            # Convert from [0, 255] -> [0.0, 1.0].
            images = images.astype(numpy.float32)
            images = numpy.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0 

Example 42

def train(self):
        eps = 1e-10
        for i in range(self.epo):
            if i % 1 == 0:
                self.show_error()

            A = np.asarray(self.A.copy())
            Z = np.asarray(self.Z.copy())
            start = time.time()
            Z1 = np.multiply(Z, np.asarray(self.A.transpose() * self.Y))
            Z = np.divide(Z1, eps + np.asarray(self.A.transpose() * self.A * self.Z)) # + eps to avoid divided by 0
            self.Z = np.asmatrix(Z)
            A1 = np.multiply(A, np.asarray( self.Y * self.Z.transpose()))
            A = np.divide(A1, eps + np.asarray( self.A * self.Z * self.Z.transpose()))
            end = time.time()
            self.A = np.asmatrix(A)
            self.time = self.time + end - start 

Example 43

def __init__(self, images, labels, fake_data=False):
        if fake_data:
            self._num_examples = 10000
        else:
            assert images.shape[0] == labels.shape[0], (
                "images.shape: %s labels.shape: %s" % (images.shape,
                                                       labels.shape))
            self._num_examples = images.shape[0]
            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns] (assuming depth == 1)
            assert images.shape[3] == 1
            images = images.reshape(images.shape[0],
                                    images.shape[1] * images.shape[2])
            # Convert from [0, 255] -> [0.0, 1.0].
            images = images.astype(numpy.float32)
            images = numpy.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0 

Example 44

def plot_defect_classifications(bmp, list_of_classified_defects, unclassified_defect_region, td_classify, defect_free_region):
  
  plt.rcParams['figure.figsize'] = (10.0, 10.0);
  plt.set_cmap('gray');

  fig = plt.figure();
  ax = fig.add_subplot(111);
  fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None);
  
  # Plot the labeled defect regions on top of the temperature field
  bmp[defect_free_region==1.] = 0.5*bmp[defect_free_region==1.] # Defect-free region
  
  txt_out = []
  for defect in list_of_classified_defects:
      defect_center = centroid(defect['defect_region'])
      outline = defect['defect_region'] ^ morphology.binary_dilation(defect['defect_region'],morphology.disk(2))
      bmp[outline==1] = 255
      txt = ax.annotate(DEFECT_TYPES[defect['defect_type']],(defect_center[0]-5,defect_center[1]), color='white', fontweight='bold', fontsize=10);
      txt.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='k')]);
      txt_out.append(txt)

  unknown_td = np.multiply(unclassified_defect_region, (td_classify != 0).astype(np.int))
  bmp[morphology.binary_dilation(unknown_td,morphology.disk(2))==1] = 0
  bmp[morphology.binary_dilation(unknown_td,morphology.disk(1))==1] = 255

  frame = ax.imshow(bmp);
  
  ax.axis('off');
  
  return fig, ax, frame, txt_out 

Example 45

def mult(self, target, deps, geo_mean_flag, tfo):
        
        #SUPPORT NONE TARGET
        
        target_vec = self.word_vecs.represent(target)
        scores = self.word_vecs.pos_scores(target_vec)
        for dep in deps:
            if dep in self.context_vecs:
                dep_vec = self.context_vecs.represent(dep)
                mult_scores = self.word_vecs.pos_scores(dep_vec)
                if geo_mean_flag:
                    mult_scores = mult_scores**(1.0/len(deps))        
                scores = np.multiply(scores, mult_scores)
            else:
                tfo.write("NOTICE: %s not in context embeddings. Ignoring.\n" % dep)   
                
        result_vec = self.word_vecs.top_scores(scores, -1)                
        return result_vec 

Example 46

def getTopWeightedFeatures(experiment_id, inst_exp_id, instance_id, size):
    instance_id = int(instance_id)
    exp = ExperimentFactory.getFactory().fromJson(experiment_id, session)
    validation_experiment = ExperimentFactory.getFactory().fromJson(inst_exp_id, session)
    #get the features
    features_names, features_values = validation_experiment.getFeatures(instance_id)
    features_values = [float(value) for value in features_values]
    #get the pipeline with scaler and logistic model
    pipeline = exp.getModelPipeline()
    #scale the features
    scaled_values = pipeline.named_steps['scaler'].transform(np.reshape(features_values,(1, -1)))
    weighted_values = np.multiply(scaled_values, pipeline.named_steps['model'].coef_)
    features = map(lambda name, value, w_value: (name, value, w_value),
                          features_names, features_values, weighted_values[0])
    features.sort(key = lambda tup: abs(tup[2]))
    features = features[:-int(size)-1:-1]
    tooltips = [x[1] for x in features]
    barplot = BarPlot([x[0] for x in features])
    dataset = PlotDataset([x[2] for x in features], None)
    dataset.setColor(colors_tools.red)
    barplot.addDataset(dataset)
    return jsonify(barplot.toJson(tooltip_data = tooltips)) 

Example 47

def __init__(self, images, labels, fake_data=False):
    """Construct a DataSet. """
    assert images.shape[0] == labels.shape[0], (
        'images.shape: %s labels.shape: %s' % (images.shape,
                                               labels.shape))
    self._num_examples = images.shape[0]

    # Convert shape from [num examples, rows, columns, depth]
    # to [num examples, rows*columns] (assuming depth == 1)
    assert images.shape[3] == 1
    images = images.reshape(images.shape[0],
                            images.shape[1] * images.shape[2])
    # Convert from [0, 255] -> [0.0, 1.0].
    images = images.astype(np.float32)
    images = np.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels 

Example 48

def prop_backward(self, X, y):
        layers_rev = list(reversed(self.layers))
        Zs_rev = list(reversed(self.Zs))
        As_rev = list(reversed(self.As))
        As_rev.append(X)

        delta0 = np.multiply(-(y - As_rev[0]), self.sigma_prime(Zs_rev[0]))
        djdw0 = np.dot(As_rev[1].T, delta0)

        self.deltas = [delta0]
        self.djdws = [djdw0]

        for i in xrange(0, len(layers_rev) - 1):
            delta_n = np.dot(self.deltas[i], layers_rev[i].W.T) * \
                self.sigma_prime(Zs_rev[i + 1])
            djdw_n = np.dot(As_rev[i + 2].T, delta_n)

            self.deltas.append(delta_n)
            self.djdws.append(djdw_n)

        self.deltas = list(reversed(self.deltas))
        self.djdws = list(reversed(self.djdws)) 

Example 49

def prop_backward(self, X, y):
        layers_rev = list(reversed(self.layers))
        Zs_rev = list(reversed(self.Zs))
        As_rev = list(reversed(self.As))
        As_rev.append(X)

        delta0 = np.multiply(-(y - As_rev[0]), self.sigma_prime(Zs_rev[0]))
        djdw0 = np.dot(As_rev[1].T, delta0)

        self.deltas = [delta0]
        self.djdws = [djdw0]

        for i in xrange(0, len(layers_rev) - 1):
            delta_n = np.dot(self.deltas[i], layers_rev[i].W.T) * \
                self.sigma_prime(Zs_rev[i + 1])
            djdw_n = np.dot(As_rev[i + 2].T, delta_n)

            self.deltas.append(delta_n)
            self.djdws.append(djdw_n)

        self.deltas = list(reversed(self.deltas))
        self.djdws = list(reversed(self.djdws)) 

Example 50

def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0 
点赞