Python numpy.float_() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def _standard_normal(shape, randstate=np.random, dtype=np.float_):
    """Generates a standard normal numpy array of given shape and dtype, i.e.
    this function is equivalent to `randstate.randn(*shape)` for real dtype and
    `randstate.randn(*shape) + 1.j * randstate.randn(shape)` for complex dtype.

    :param tuple shape: Shape of array to be returned
    :param randstate: An instance of :class:`numpy.random.RandomState` (default is
        ``np.random``))
    :param dtype: ``np.float_`` (default) or `np.complex_`

    Returns
    -------

    A: An array of given shape and dtype with standard normal entries

    """
    if dtype == np.float_:
        return randstate.randn(*shape)
    elif dtype == np.complex_:
        return randstate.randn(*shape) + 1.j * randstate.randn(*shape)
    else:
        raise ValueError('{} is not a valid dtype.'.format(dtype)) 

Example 2

def test_operations_typesafety(nr_sites, local_dim, rank, rgen):
    # create a real MPA
    mpo1 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
                              randstate=rgen, dtype=np.float_)
    mpo2 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
                              randstate=rgen, dtype=np.complex_)

    assert mpo1.dtype == np.float_
    assert mpo2.dtype == np.complex_

    assert (mpo1 + mpo1).dtype == np.float_
    assert (mpo1 + mpo2).dtype == np.complex_
    assert (mpo2 + mpo1).dtype == np.complex_

    assert mp.sumup((mpo1, mpo1)).dtype == np.float_
    assert mp.sumup((mpo1, mpo2)).dtype == np.complex_
    assert mp.sumup((mpo2, mpo1)).dtype == np.complex_

    assert (mpo1 - mpo1).dtype == np.float_
    assert (mpo1 - mpo2).dtype == np.complex_
    assert (mpo2 - mpo1).dtype == np.complex_

    mpo1 += mpo2
    assert mpo1.dtype == np.complex_ 

Example 3

def __init__(self, data, bucket_size=128):
        if bucket_size < 1:
            raise ValueError("A minimum bucket size of 1 is expected.")

        self._data = data
        self._n, self._k = self._data.shape
        self._nodes = None
        self._buckets = []
        self._bucket_size = bucket_size

        self._node_dtype = numpy.dtype([
            ('size', numpy.intp),
            ('bucket', numpy.intp),
            ('lower_bounds', (numpy.float_, self._k)),
            ('upper_bounds', (numpy.float_, self._k)),
        ])
        self._neighbour_dtype = numpy.dtype([
            ('squared_distance', numpy.float_),
            ('index', numpy.intp),
        ])

        self._build() 

Example 4

def search(self, point, count, radius, sort):
        """Retrieve the neighbours to a point."""
        if count is None:
            count = self._n
        elif count < 1:
            return numpy.empty(0, dtype=self._neighbour_dtype)

        if radius is None:
            radius = numpy.inf
        elif radius < 0.0:
            return numpy.empty(0, dtype=self._neighbour_dtype)

        point = numpy.asarray(point, dtype=numpy.float_)
        if count >= self._n:
            return self._search_all_within_radius(point, radius, sort)
        else:
            return self._search_k_nearests(point, count, radius, sort) 

Example 5

def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray)) 

Example 6

def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel() 

Example 7

def _fix_type(value):
    """convert possible types to str, float, and bool"""
    # Because numpy floats can not be pickled to json
    if isinstance(value, string_types):
        return str(value)
    if isinstance(value, float_):
        return float(value)
    if isinstance(value, bool_):
        return bool(value)
    if isinstance(value, set):
        return list(value)
    if isinstance(value, Basic):
        return str(value)
    if hasattr(value, 'id'):
        return str(value.id)
    # if value is None:
    #     return ''
    return value 

Example 8

def default(self, obj):
        # convert dates and numpy objects in a json serializable format
        if isinstance(obj, datetime):
            return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
        elif isinstance(obj, date):
            return obj.strftime('%Y-%m-%d')
        elif type(obj) in (np.int_, np.intc, np.intp, np.int8, np.int16,
                           np.int32, np.int64, np.uint8, np.uint16,
                           np.uint32, np.uint64):
            return int(obj)
        elif type(obj) in (np.bool_,):
            return bool(obj)
        elif type(obj) in (np.float_, np.float16, np.float32, np.float64,
                           np.complex_, np.complex64, np.complex128):
            return float(obj)

        # Let the base class default method raise the TypeError
        return json.JSONEncoder.default(self, obj) 

Example 9

def event_bounds_expressions(self, event_bounds_exp):
        if hasattr(self, 'output_equations'):
            assert len(event_bounds_exp)+1 == self.output_equations.shape[0]
        if hasattr(self, 'output_equations_functions'):
            assert len(event_bounds_exp)+1 == \
                self.output_equations_functions.size
        if hasattr(self, 'state_equations'):
            assert len(event_bounds_exp)+1 == self.state_equations.shape[0]
        if hasattr(self, 'state_equations_functions'):
            assert len(event_bounds_exp)+1 == \
                self.state_equations_functions.size
        self._event_bounds_expressions = event_bounds_exp
        self.event_bounds = np.array(
            [sp.N(bound, subs=self.constants_values)
             for bound in event_bounds_exp],
            dtype=np.float_
        ) 

Example 10

def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel() 

Example 11

def find_a_dominant_color(image):
    # K-mean clustering to find the k most dominant color, from:
    # http://stackoverflow.com/questions/3241929/python-find-dominant-most-common-color-in-an-image
    n_clusters = 5

    # Get image into a workable form
    im = image.copy()
    im = im.resize((150, 150))      # optional, to reduce time
    ar = scipy.misc.fromimage(im)
    im_shape = ar.shape
    ar = ar.reshape(scipy.product(im_shape[:2]), im_shape[2])
    ar = np.float_(ar)

    # Compute clusters
    codes, dist = scipy.cluster.vq.kmeans(ar, n_clusters)
    vecs, dist = scipy.cluster.vq.vq(ar, codes)         # assign codes
    counts, bins = scipy.histogram(vecs, len(codes))    # count occurrences

    # Get the indexes of the most frequent, 2nd most frequent, 3rd, ...
    sorted_idxs = np.argsort(counts)

    # Get the color
    peak = codes[sorted_idxs[1]] # get second most frequent color

    return [int(i) for i in peak.tolist()] # list comprehension to quickly cast everything to int 

Example 12

def test_empty_fancy(self):
        empty_farr = np.array([], dtype=np.float_)
        empty_iarr = np.array([], dtype=np.int_)
        empty_barr = np.array([], dtype=np.bool_)

        # pd.DatetimeIndex is excluded, because it overrides getitem and should
        # be tested separately.
        for idx in [self.strIndex, self.intIndex, self.floatIndex]:
            empty_idx = idx.__class__([])

            self.assertTrue(idx[[]].identical(empty_idx))
            self.assertTrue(idx[empty_iarr].identical(empty_idx))
            self.assertTrue(idx[empty_barr].identical(empty_idx))

            # np.ndarray only accepts ndarray of int & bool dtypes, so should
            # Index.
            self.assertRaises(IndexError, idx.__getitem__, empty_farr) 

Example 13

def test_fromValue(self):

        nans = Series(np.NaN, index=self.ts.index)
        self.assertEqual(nans.dtype, np.float_)
        self.assertEqual(len(nans), len(self.ts))

        strings = Series('foo', index=self.ts.index)
        self.assertEqual(strings.dtype, np.object_)
        self.assertEqual(len(strings), len(self.ts))

        d = datetime.now()
        dates = Series(d, index=self.ts.index)
        self.assertEqual(dates.dtype, 'M8[ns]')
        self.assertEqual(len(dates), len(self.ts))

        # GH12336
        # Test construction of categorical series from value
        categorical = Series(0, index=self.ts.index, dtype="category")
        expected = Series(0, index=self.ts.index).astype("category")
        self.assertEqual(categorical.dtype, 'category')
        self.assertEqual(len(categorical), len(self.ts))
        tm.assert_series_equal(categorical, expected) 

Example 14

def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray)) 

Example 15

def almost(a, b, decimal=6, fill_value=True):
    """
    Returns True if a and b are equal up to decimal places.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal)
    return d.ravel() 

Example 16

def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray)) 

Example 17

def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel() 

Example 18

def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray)) 

Example 19

def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel() 

Example 20

def kl_divergence(p, q):
    """
    Returns KL-divergence of distribution q from distribution p.
    
    The Kullback-Leibler (KL) divergence is defined as
    
    .. math::
    
           \\textrm{KL-divergence}(p, q) :=
           \\sum_{x} p(x) \\log{} \\frac{p(x)}{q(x)}
    
    Warning: this function uses numpy's scalar floating point types to
    perform the evaluation. Therefore, the result may be non-finite.
    For example, if the state x has non-zero probability for distribution p,
    but zero probability for distribution q, then the result will be
    non-finite.
    """
    accum = 0.0
    for x in p:
        p_x = numpy.float_(p[x])
        if p_x != 0.0:
            q_x = numpy.float_(q.get(x, 0.0))
            accum += p_x * numpy.log(p_x / q_x)
    return accum 

Example 21

def default(self, obj):
        # convert dates and numpy objects in a json serializable format
        if isinstance(obj, datetime):
            return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
        elif isinstance(obj, date):
            return obj.strftime('%Y-%m-%d')
        elif type(obj) in [np.int_, np.intc, np.intp, np.int8, np.int16,
                           np.int32, np.int64, np.uint8, np.uint16,
                           np.uint32, np.uint64]:
            return int(obj)
        elif type(obj) in [np.bool_]:
            return bool(obj)
        elif type(obj) in [np.float_, np.float16, np.float32, np.float64,
                           np.complex_, np.complex64, np.complex128]:
            return float(obj)

        # Let the base class default method raise the TypeError
        return json.JSONEncoder.default(self, obj) 

Example 22

def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel() 

Example 23

def almost(a, b, decimal=6, fill_value=True):
    """
    Returns True if a and b are equal up to decimal places.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal)
    return d.ravel() 

Example 24

def achisquare(f_obs,f_exp=None):
    """
    Calculates a one-way chi square for array of observed frequencies and returns
    the result.  If no expected frequencies are given, the total N is assumed to
    be equally distributed across all groups (NOT RIGHT??)
    
    Usage:   achisquare(f_obs, f_exp=None)   f_obs = array of observed cell freq.
    Returns: chisquare-statistic, associated p-value
    """

    k = len(f_obs)
    if f_exp == None:
        f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.float_)
    f_exp = f_exp.astype(N.float_)
    chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
    return chisq, achisqprob(chisq, k-1) 

Example 25

def asquare_of_sums(inarray, dimension=None, keepdims=0):
    """
    Adds the values in the passed array, squares that sum, and returns the
    result.  Dimension can equal None (ravel array first), an integer (the
    dimension over which to operate), or a sequence (operate over multiple
    dimensions).  If keepdims=1, the returned array will have the same
    NUMBER of dimensions as the original.
    
    Usage:   asquare_of_sums(inarray, dimension=None, keepdims=0)
    Returns: the square of the sum over dim(s) in dimension
    """
    if dimension == None:
        inarray = N.ravel(inarray)
        dimension = 0
    s = asum(inarray,dimension,keepdims)
    if type(s) == N.ndarray:
        return s.astype(N.float_)*s
    else:
        return float(s)*s 

Example 26

def arankdata(inarray):
    """
    Ranks the data in inarray, dealing with ties appropritely.  Assumes
    a 1D inarray.  Adapted from Gary Perlman's |Stat ranksort.
    
    Usage:   arankdata(inarray)
    Returns: array of length equal to inarray, containing rank scores
    """
    n = len(inarray)
    svec, ivec = ashellsort(inarray)
    sumranks = 0
    dupcount = 0
    newarray = N.zeros(n,N.float_)
    for i in range(n):
        sumranks = sumranks + i
        dupcount = dupcount + 1
        if i==n-1 or svec[i] <> svec[i+1]:
            averank = sumranks / float(dupcount) + 1
            for j in range(i-dupcount+1,i+1):
                newarray[ivec[j]] = averank
            sumranks = 0
            dupcount = 0
    return newarray 

Example 27

def load_data(path, seq_length):
    with open(path) as file:
        content = file.read().strip()
        key = sorted(list(set(content)))

        dataX = []
        dataY = []

        for i in range(0, len(content) - seq_length, 1):
            seq_in = content[i:i+seq_length]
            seq_out = content[i+seq_length]
            dataX.append(encode_vals(seq_in, key))
            dataY.append(encode(seq_out, key))

        X = np.reshape(dataX, (len(dataX), seq_length, len(key)))
        X = np.float_(X)
        Y = np.asarray(dataY)

        return (X, Y, key) 

Example 28

def npy2py_type(npy_type):
    int_types = [
        np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, np.int64,
        np.uint8, np.uint16, np.uint32, np.uint64
    ]

    float_types = [np.float_, np.float16, np.float32, np.float64]

    bytes_types = [np.str_, np.string_]

    if npy_type in int_types:
        return int
    if npy_type in float_types:
        return float
    if npy_type in bytes_types:
        return bytes

    if hasattr(npy_type, 'char'):
        if npy_type.char in ['S', 'a']:
            return bytes
        raise TypeError

    return npy_type 

Example 29

def apply_rescaling(self, frm_in, frm_out, n_avg, virtual_param):
        """Apply rescaling and averaging operations."""
        frm_out.i = frm_in.i
        # --- perform averaging on histograms
        val = np.float_(self.factor) / np.float_(n_avg)
        # --- rescale distance histograms
        if frm_out.has_key(base.loc_histograms):
            X = frm_out.get_data(base.loc_histograms)
            dict_util.scale_values(X, val)
        # --- multiref: rescale shell XX histogram
        if (virtual_param is not None and self.geometry == 'MultiReferenceStructure'):
            if frm_out.has_key(base.loc_shell_Hxx):
                X = frm_out.get_data(base.loc_shell_Hxx)
                dict_util.scale_values(X, val)
        # --- rescale length histograms
        if frm_out.has_key(base.loc_len_histograms):
            X = frm_out.get_data(base.loc_len_histograms)
            dict_util.scale_values(X, val)
        # ---
        frm_out.put_data('log', frm_in.get_data('log'))
        frm_out.put_meta(self.get_meta(n_avg=n_avg)) 

Example 30

def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray)) 

Example 31

def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
    """
    Returns true if all components of a and b are equal to given tolerances.

    If fill_value is True, masked values considered equal. Otherwise,
    masked values are considered unequal.  The relative error rtol should
    be positive and << 1.0 The absolute error atol comes into play for
    those elements of b that are very small or zero; it says how small a
    must be also.

    """
    m = mask_or(getmask(a), getmask(b))
    d1 = filled(a)
    d2 = filled(b)
    if d1.dtype.char == "O" or d2.dtype.char == "O":
        return np.equal(d1, d2).ravel()
    x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
    y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
    d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
    return d.ravel() 

Example 32

def prepare_2D_x(L, viz_type=None, fs=None):
    # X vector: samples or time
    x = _np.arange(L - 1, dtype=_np.float_)

    if viz_type == 'time':
        x /= fs
    elif viz_type == 'linFFT':
        x = _np.fft.rfftfreq(x.shape[0] * 2 - 1, 1 / fs)
    elif viz_type == 'logFFT':
        x = _np.fft.rfftfreq(x.shape[0] * 2 - 1, 1 / fs)

    return x 

Example 33

def sph_harm(m, n, az, el, type='complex'):
    '''Compute sphercial harmonics

    Parameters
    ----------
    m : (int)
        Order of the spherical harmonic. abs(m) <= n

    n : (int)
        Degree of the harmonic, sometimes called l. n >= 0

    az: (float)
        Azimuthal (longitudinal) coordinate [0, 2pi], also called Theta.

    el : (float)
        Elevation (colatitudinal) coordinate [0, pi], also called Phi.

    Returns
    -------
    y_mn : (complex float)
        Complex spherical harmonic of order m and degree n,
        sampled at theta = az, phi = el
    '''
    if type == 'legacy':
        return scy.sph_harm(m, n, az, el)
    elif type == 'real':
        Lnm = scy.lpmv(_np.abs(m), n, _np.cos(el))

        factor_1 = (2 * n + 1) / (4 * _np.pi)
        factor_2 = scy.factorial(n - _np.abs(m)) / scy.factorial(n + abs(m))

        if m != 0:
            factor_1 = 2 * factor_1

        if m < 0:
            return (-1) ** m * _np.sqrt(factor_1 * factor_2) * Lnm * _np.sin(m * az)
        else:
            return (-1) ** m * _np.sqrt(factor_1 * factor_2) * Lnm * _np.cos(m * az)
    else:
        # For the correct Condon–Shortley phase, all m>0 need to be increased by 1
        return (-1) ** _np.float_(m - (m < 0) * (m % 2)) * scy.sph_harm(m, n, az, el) 

Example 34

def random_lowrank(rows, cols, rank, randstate=np.random, dtype=np.float_):
    """Returns a random lowrank matrix of given shape and dtype"""
    if dtype == np.float_:
        A = randstate.randn(rows, rank)
        B = randstate.randn(cols, rank)
    elif dtype == np.complex_:
        A = randstate.randn(rows, rank) + 1.j * randstate.randn(rows, rank)
        B = randstate.randn(cols, rank) + 1.j * randstate.randn(cols, rank)
    else:
        raise ValueError("{} is not a valid dtype".format(dtype))

    C = A.dot(B.conj().T)
    return C / np.linalg.norm(C) 

Example 35

def test_inner_fast(nr_sites, local_dim, rank, benchmark, rgen):
    mpa1 = factory.random_mpa(nr_sites, local_dim, 1, dtype=np.float_,
                              randstate=rgen, normalized=True)
    mpa2 = factory.random_mpa(nr_sites, local_dim, rank, dtype=np.float_,
                              randstate=rgen, normalized=True)

    benchmark(mpsp.inner_prod_mps, mpa1, mpa2) 

Example 36

def pytest_namespace():
    return dict(
        # nr_sites, local_dim, rank
        MP_TEST_PARAMETERS=[(1, 7, np.nan), (2, 3, 3), (3, 2, 4), (6, 2, 4),
                            (4, 3, 5), (5, 2, 1)],
        MP_TEST_DTYPES=[np.float_, np.complex_]
    ) 

Example 37

def test_numpy_float_python_long_addition(self):
        # Check that numpy float and python longs can be added correctly.
        a = np.float_(23.) + 2**135
        assert_equal(a, 23. + 2**135) 

Example 38

def test_scalar_return_type(self):
        # Full scalar indices should return scalars and object
        # arrays should not call PyArray_Return on their items
        class Zero(object):
            # The most basic valid indexing
            def __index__(self):
                return 0

        z = Zero()

        class ArrayLike(object):
            # Simple array, should behave like the array
            def __array__(self):
                return np.array(0)

        a = np.zeros(())
        assert_(isinstance(a[()], np.float_))
        a = np.zeros(1)
        assert_(isinstance(a[z], np.float_))
        a = np.zeros((1, 1))
        assert_(isinstance(a[z, np.array(0)], np.float_))
        assert_(isinstance(a[z, ArrayLike()], np.float_))

        # And object arrays do not call it too often:
        b = np.array(0)
        a = np.array(0, dtype=object)
        a[()] = b
        assert_(isinstance(a[()], np.ndarray))
        a = np.array([b, None])
        assert_(isinstance(a[z], np.ndarray))
        a = np.array([[b, None]])
        assert_(isinstance(a[z, np.array(0)], np.ndarray))
        assert_(isinstance(a[z, ArrayLike()], np.ndarray)) 

Example 39

def test_non_integer_sequence_multiplication(self):
        # Numpy scalar sequence multiply should not work with non-integers
        def mult(a, b):
            return a * b

        self.assert_deprecated(mult, args=([1], np.float_(3)))
        self.assert_not_deprecated(mult, args=([1], np.int_(3))) 

Example 40

def test_ptp(self):
        (x, X, XX, m, mx, mX, mXX,) = self.d
        (n, m) = X.shape
        self.assertEqual(mx.ptp(), mx.compressed().ptp())
        rows = np.zeros(n, np.float_)
        cols = np.zeros(m, np.float_)
        for k in range(m):
            cols[k] = mX[:, k].compressed().ptp()
        for k in range(n):
            rows[k] = mX[k].compressed().ptp()
        self.assertTrue(eq(mX.ptp(0), cols))
        self.assertTrue(eq(mX.ptp(1), rows)) 

Example 41

def test_testAverage2(self):
        # More tests of average.
        w1 = [0, 1, 1, 1, 1, 0]
        w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
        x = arange(6, dtype=np.float_)
        assert_equal(average(x, axis=0), 2.5)
        assert_equal(average(x, axis=0, weights=w1), 2.5)
        y = array([arange(6, dtype=np.float_), 2.0 * arange(6)])
        assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)
        assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.)
        assert_equal(average(y, axis=1),
                     [average(x, axis=0), average(x, axis=0) * 2.0])
        assert_equal(average(y, None, weights=w2), 20. / 6.)
        assert_equal(average(y, axis=0, weights=w2),
                     [0., 1., 2., 3., 4., 10.])
        assert_equal(average(y, axis=1),
                     [average(x, axis=0), average(x, axis=0) * 2.0])
        m1 = zeros(6)
        m2 = [0, 0, 1, 1, 0, 0]
        m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
        m4 = ones(6)
        m5 = [0, 1, 1, 1, 1, 1]
        assert_equal(average(masked_array(x, m1), axis=0), 2.5)
        assert_equal(average(masked_array(x, m2), axis=0), 2.5)
        assert_equal(average(masked_array(x, m4), axis=0).mask, [True])
        assert_equal(average(masked_array(x, m5), axis=0), 0.0)
        assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
        z = masked_array(y, m3)
        assert_equal(average(z, None), 20. / 6.)
        assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
        assert_equal(average(z, axis=1), [2.5, 5.0])
        assert_equal(average(z, axis=0, weights=w2),
                     [0., 1., 99., 99., 4.0, 10.0]) 

Example 42

def __iter__(self):
        lengths = np.array(
            [(-l[0], -l[1], np.random.random()) for l in self.lengths],
            dtype=[('l1', np.int_), ('l2', np.int_), ('rand', np.float_)]
        )
        indices = np.argsort(lengths, order=('l1', 'l2', 'rand'))
        batches = [indices[i:i + self.batch_size]
                   for i in range(0, len(indices), self.batch_size)]
        if self.shuffle:
            np.random.shuffle(batches)
        return iter([i for batch in batches for i in batch]) 

Example 43

def scale(X, n):
    Xn = unfold(X, n)
    m = np.float_(np.sqrt((Xn ** 2).sum(axis=1)))
    m[m == 0] = 1
    for i in range(Xn.shape[0]):
        Xn[i, :] = Xn[i] / m[i]
    return fold(Xn, n, X.shape)


# TODO more efficient cython implementation 

Example 44

def __init__(self, *systems):
        """
        Initialize a BlockDiagram, with an optional list of systems to start
        the diagram.
        """
        if len(systems) == 0:
            self.systems = np.array([], dtype=object)
            self.connections = np.array([], dtype=np.bool_).reshape((0, 0))
            self.dts = np.array([], dtype=np.float_)
            self.events = np.array([], dtype=np.bool_)
            self.cum_inputs = np.array([0], dtype=np.int_)
            self.cum_outputs = np.array([0], dtype=np.int_)
            self.cum_states = np.array([0], dtype=np.int_)
            self.cum_events = np.array([0], dtype=np.int_)
        else:
            self.systems = np.array(systems, dtype=object)

            self.dts = np.zeros_like(self.systems, dtype=np.float_)
            self.events = np.zeros_like(self.systems, dtype=np.bool_)
            self.cum_inputs = np.zeros(self.systems.size+1, dtype=np.int_)
            self.cum_outputs = np.zeros(self.systems.size+1, dtype=np.int_)
            self.cum_states = np.zeros(self.systems.size+1, dtype=np.int_)
            self.cum_events = np.zeros(self.systems.size+1, dtype=np.int_)

            for i, sys in enumerate(self.systems):
                self.dts[i] = sys.dt
                self.events[i] = (
                    getattr(sys, 'event_equation_function', None) and
                    getattr(sys, 'update_equation_function', None)
                )
                self.cum_inputs[i+1] = self.cum_inputs[i] + sys.dim_input
                self.cum_outputs[i+1] = self.cum_outputs[i] + sys.dim_output
                self.cum_states[i+1] = self.cum_states[i] + sys.dim_state
                self.cum_events[i+1] = self.cum_events[i] + self.events[i]

            self.connections = np.zeros(
                    (self.cum_outputs[-1], self.cum_inputs[-1]),
                    dtype=np.bool_) 

Example 45

def test_numpy_float_python_long_addition(self):
        # Check that numpy float and python longs can be added correctly.
        a = np.float_(23.) + 2**135
        assert_equal(a, 23. + 2**135) 

Example 46

def test_empty_tuple_index(self):
        # Empty tuple index creates a view
        a = np.array([1, 2, 3])
        assert_equal(a[()], a)
        assert_(a[()].base is a)
        a = np.array(0)
        assert_(isinstance(a[()], np.int_))

        # Regression, it needs to fall through integer and fancy indexing
        # cases, so need the with statement to ignore the non-integer error.
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', '', DeprecationWarning)
            a = np.array([1.])
            assert_(isinstance(a[0.], np.float_))

            a = np.array([np.array(1)], dtype=object)
            assert_(isinstance(a[0.], np.ndarray)) 

Example 47

def test_scalar_return_type(self):
        # Full scalar indices should return scalars and object
        # arrays should not call PyArray_Return on their items
        class Zero(object):
            # The most basic valid indexing
            def __index__(self):
                return 0

        z = Zero()

        class ArrayLike(object):
            # Simple array, should behave like the array
            def __array__(self):
                return np.array(0)

        a = np.zeros(())
        assert_(isinstance(a[()], np.float_))
        a = np.zeros(1)
        assert_(isinstance(a[z], np.float_))
        a = np.zeros((1, 1))
        assert_(isinstance(a[z, np.array(0)], np.float_))
        assert_(isinstance(a[z, ArrayLike()], np.float_))

        # And object arrays do not call it too often:
        b = np.array(0)
        a = np.array(0, dtype=object)
        a[()] = b
        assert_(isinstance(a[()], np.ndarray))
        a = np.array([b, None])
        assert_(isinstance(a[z], np.ndarray))
        a = np.array([[b, None]])
        assert_(isinstance(a[z, np.array(0)], np.ndarray))
        assert_(isinstance(a[z, ArrayLike()], np.ndarray)) 

Example 48

def test_non_integer_sequence_multiplication(self):
        # Numpy scalar sequence multiply should not work with non-integers
        def mult(a, b):
            return a * b

        self.assert_deprecated(mult, args=([1], np.float_(3)))
        self.assert_not_deprecated(mult, args=([1], np.int_(3))) 

Example 49

def test_ptp(self):
        (x, X, XX, m, mx, mX, mXX,) = self.d
        (n, m) = X.shape
        self.assertEqual(mx.ptp(), mx.compressed().ptp())
        rows = np.zeros(n, np.float_)
        cols = np.zeros(m, np.float_)
        for k in range(m):
            cols[k] = mX[:, k].compressed().ptp()
        for k in range(n):
            rows[k] = mX[k].compressed().ptp()
        self.assertTrue(eq(mX.ptp(0), cols))
        self.assertTrue(eq(mX.ptp(1), rows)) 

Example 50

def test_testAverage2(self):
        # More tests of average.
        w1 = [0, 1, 1, 1, 1, 0]
        w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
        x = arange(6, dtype=np.float_)
        assert_equal(average(x, axis=0), 2.5)
        assert_equal(average(x, axis=0, weights=w1), 2.5)
        y = array([arange(6, dtype=np.float_), 2.0 * arange(6)])
        assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)
        assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.)
        assert_equal(average(y, axis=1),
                     [average(x, axis=0), average(x, axis=0) * 2.0])
        assert_equal(average(y, None, weights=w2), 20. / 6.)
        assert_equal(average(y, axis=0, weights=w2),
                     [0., 1., 2., 3., 4., 10.])
        assert_equal(average(y, axis=1),
                     [average(x, axis=0), average(x, axis=0) * 2.0])
        m1 = zeros(6)
        m2 = [0, 0, 1, 1, 0, 0]
        m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
        m4 = ones(6)
        m5 = [0, 1, 1, 1, 1, 1]
        assert_equal(average(masked_array(x, m1), axis=0), 2.5)
        assert_equal(average(masked_array(x, m2), axis=0), 2.5)
        assert_equal(average(masked_array(x, m4), axis=0).mask, [True])
        assert_equal(average(masked_array(x, m5), axis=0), 0.0)
        assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
        z = masked_array(y, m3)
        assert_equal(average(z, None), 20. / 6.)
        assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
        assert_equal(average(z, axis=1), [2.5, 5.0])
        assert_equal(average(z, axis=0, weights=w2),
                     [0., 1., 99., 99., 4.0, 10.0]) 
点赞