Python numpy.uint() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def df_type_to_str(i):
    '''
    Convert into simple datatypes from pandas/numpy types
    '''
    if isinstance(i, np.bool_):
        return bool(i)
    if isinstance(i, np.int_):
        return int(i)
    if isinstance(i, np.float):
        if np.isnan(i):
            return 'NaN'
        elif np.isinf(i):
            return str(i)
        return float(i)
    if isinstance(i, np.uint):
        return int(i)
    if type(i) == bytes:
        return i.decode('UTF-8')
    if isinstance(i, (tuple, list)):
        return str(i)
    if i is pd.NaT:  # not identified as a float null
        return 'NaN'
    return str(i) 

Example 2

def savetxt(filename, ndarray):
    dir = os.path.dirname(filename)

    if not os.path.exists(dir):
        os.makedirs(dir)

    if not os.path.isfile(filename):
        with open(filename, 'w') as f:
            labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
            for row in ndarray:
                row_str = row.astype(str)
                label_str = labels[row[-1]]
                feature_str = ' '.join(row_str[:-1])
                f.write('|labels {} |features {}\n'.format(label_str, feature_str))
    else:
        print("File already exists", filename) 

Example 3

def save_as_txt(filename, ndarray):
    dir = os.path.dirname(filename)

    if not os.path.exists(dir):
        os.makedirs(dir)

    if not os.path.isfile(filename):
        print("Saving to ", filename, end=" ")
        with open(filename, 'w') as f:
            labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
            for row in ndarray:
                row_str = row.astype(str)
                label_str = labels[row[-1]]
                feature_str = ' '.join(row_str[:-1])
                f.write('|labels {} |features {}\n'.format(label_str, feature_str))
    else:
        print("File already exists", filename) 

Example 4

def union_elements(elements):
    """elements = [(chr, s, e, id), ...], this is to join elements that have a
    deletion in the 'to' species
    """

    if len(elements) < 2: return elements
    assert set( [e[3] for e in elements] ) == set( [elements[0][3]] ), "more than one id"
    el_id = elements[0][3]

    unioned_elements = []
    for ch, chgrp in groupby(elements, key=itemgetter(0)):
        for (s, e) in elem_u( np.array([itemgetter(1, 2)(_) for _ in chgrp], dtype=np.uint) ):
            if (s < e):
                unioned_elements.append( (ch, s, e, el_id) )
    assert len(unioned_elements) <= len(elements)
    return unioned_elements 

Example 5

def __init__(self, mean, cov, df, seed=None):
        """Defines the mean, co-variance and degrees of freedom a p-dimensional multivariate Student T distribution.

        Parameters
        ----------
        mean: numpy.ndarray
            Vector containing p means, one for every dimension        
        cov: numpy.ndarray
            pxp matrix containing the co-variance matrix        
        df: np.uint
            Degrees of freedom
 
        """

        MultiStudentT._check_parameters(mean, cov, df)
        
        self.mean = mean
        self.cov = cov
        self.df = df
        self.rng = np.random.RandomState(seed) 

Example 6

def test_predict_wrong_X_dimensions(self):
        rs = np.random.RandomState(1)

        model = RandomForestWithInstances(np.zeros((10,), dtype=np.uint), bounds=np.array(
            list(map(lambda x: (0, 10), range(10))), dtype=object))
        X = rs.rand(10)
        self.assertRaisesRegexp(ValueError, "Expected 2d array, got 1d array!",
                                model.predict, X)
        X = rs.rand(10, 10, 10)
        self.assertRaisesRegexp(ValueError, "Expected 2d array, got 3d array!",
                                model.predict, X)

        X = rs.rand(10, 5)
        self.assertRaisesRegexp(ValueError, "Rows in X should have 10 entries "
                                            "but have 5!",
                                model.predict, X) 

Example 7

def test_predict_marginalized_over_instances_mocked(self, rf_mock):
        """Use mock to count the number of calls to predict()"""

        class SideEffect(object):
            def __call__(self, X):
                # Numpy array of number 0 to X.shape[0]
                rval = np.array(list(range(X.shape[0]))).reshape((-1, 1))
                # Return mean and variance
                return rval, rval

        rf_mock.side_effect = SideEffect()

        rs = np.random.RandomState(1)
        F = rs.rand(10, 5)

        model = RandomForestWithInstances(np.zeros((15,), dtype=np.uint),
                                          instance_features=F,
                                          bounds=np.array(list(map(lambda x: (0, 10), range(10))), dtype=object))
        means, vars = model.predict_marginalized_over_instances(rs.rand(11, 10))
        self.assertEqual(rf_mock.call_count, 11)
        self.assertEqual(means.shape, (11, 1))
        self.assertEqual(vars.shape, (11, 1))
        for i in range(11):
            self.assertEqual(means[i], 4.5)
            self.assertEqual(vars[i], 4.5) 

Example 8

def test_train_and_predict_with_rf(self):
        rs = np.random.RandomState(1)
        X = rs.rand(20, 10)
        Y = rs.rand(10, 2)
        model = UncorrelatedMultiObjectiveRandomForestWithInstances(
            ['cost', 'ln(runtime)'],
            types=np.zeros((10, ), dtype=np.uint),
            bounds=np.array([
                (0, np.nan), (0, np.nan), (0, np.nan), (0, np.nan), (0, np.nan),
                (0, np.nan), (0, np.nan), (0, np.nan), (0, np.nan), (0, np.nan)
            ], dtype=object),
            rf_kwargs={'seed': 1},
            pca_components=5
        )
        self.assertEqual(model.estimators[0].seed, 1)
        self.assertEqual(model.estimators[1].seed, 1)
        self.assertEqual(model.pca_components, 5)
        model.train(X[:10], Y)
        m, v = model.predict(X[10:])
        self.assertEqual(m.shape, (10, 2))
        self.assertEqual(v.shape, (10, 2)) 

Example 9

def _L(x):
    # initialize with zeros
    batch_size = x.shape[0]
    a = T.zeros((batch_size, num_actuators, num_actuators))
    # set diagonal elements
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), num_actuators)
    diag_idx = T.tile(T.arange(num_actuators), batch_size)
    b = T.set_subtensor(a[batch_idx, diag_idx, diag_idx], T.flatten(T.exp(x[:, :num_actuators])))
    # set lower triangle
    cols = np.concatenate([np.array(range(i), dtype=np.uint) for i in xrange(num_actuators)])
    rows = np.concatenate([np.array([i]*i, dtype=np.uint) for i in xrange(num_actuators)])
    cols_idx = T.tile(T.as_tensor_variable(cols), batch_size)
    rows_idx = T.tile(T.as_tensor_variable(rows), batch_size)
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), len(cols))
    c = T.set_subtensor(b[batch_idx, rows_idx, cols_idx], T.flatten(x[:, num_actuators:]))
    return c 

Example 10

def _L(x):
    # initialize with zeros
    batch_size = x.shape[0]
    a = T.zeros((batch_size, num_actuators, num_actuators))
    # set diagonal elements
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), num_actuators)
    diag_idx = T.tile(T.arange(num_actuators), batch_size)
    b = T.set_subtensor(a[batch_idx, diag_idx, diag_idx], T.flatten(T.exp(x[:, :num_actuators])))
    # set lower triangle
    cols = np.concatenate([np.array(range(i), dtype=np.uint) for i in xrange(num_actuators)])
    rows = np.concatenate([np.array([i]*i, dtype=np.uint) for i in xrange(num_actuators)])
    cols_idx = T.tile(T.as_tensor_variable(cols), batch_size)
    rows_idx = T.tile(T.as_tensor_variable(rows), batch_size)
    batch_idx = T.extra_ops.repeat(T.arange(batch_size), len(cols))
    c = T.set_subtensor(b[batch_idx, rows_idx, cols_idx], T.flatten(x[:, num_actuators:]))
    return c 

Example 11

def __init__(self, max_timesteps, max_episodes, observation_shape, action_shape):
    self.max_timesteps = max_timesteps
    self.max_episodes = max_episodes
    self.observation_shape = observation_shape
    self.action_shape = action_shape

    self.preobs = np.empty((self.max_timesteps, self.max_episodes) + observation_shape)
    self.actions = np.empty((self.max_timesteps, self.max_episodes) + action_shape)
    self.rewards = np.empty((self.max_timesteps, self.max_episodes))
    self.postobs = np.empty((self.max_timesteps, self.max_episodes) + observation_shape)
    self.terminals = np.empty((self.max_timesteps, self.max_episodes), dtype = np.bool)
    self.lengths = np.zeros(self.max_episodes, np.uint)
    
    self.num_episodes = 0
    self.episode = 0
    self.timestep = 0 

Example 12

def test_make_vector(self):
        mv = opt.make_vector(1, 2, 3)
        self.assertRaises(
            tensor.NotScalarConstantError,
            get_scalar_constant_value,
            mv)
        assert get_scalar_constant_value(mv[0]) == 1
        assert get_scalar_constant_value(mv[1]) == 2
        assert get_scalar_constant_value(mv[2]) == 3
        assert get_scalar_constant_value(mv[numpy.int32(0)]) == 1
        assert get_scalar_constant_value(mv[numpy.int64(1)]) == 2
        assert get_scalar_constant_value(mv[numpy.uint(2)]) == 3
        t = theano.scalar.Scalar('int64')
        self.assertRaises(
            tensor.NotScalarConstantError,
            get_scalar_constant_value,
            mv[t()]) 

Example 13

def data_style_func(df):
        '''
        Default value that can be used as callback for data_style_func

        Args:
            df: the dataframe that will be used to build the presentation model

        Returns:
            a function table takes idx, col as arguments and returns a dictionary of html style attributes
        '''
        def _style_func(r, c):
            if isinstance(df.at[r,c], (np.int_, np.float, np.uint)):
                return td_style_to_str(default_numeric_td_style)
            return td_style_to_str(default_td_style)
        return _style_func 

Example 14

def matrix_size(udat, vdat, **kwargs):

    maxuv_factor = kwargs.get('maxuv_factor', 4.8)
    minuv_factor = kwargs.get('minuv_factor', 4.)

    uvdist = np.sqrt(udat**2 + vdat**2)

    maxuv = max(uvdist)*maxuv_factor
    minuv = min(uvdist)/minuv_factor

    minpix = np.uint(maxuv/minuv)

    Nuv = kwargs.get('force_nx', int(2**np.ceil(np.log2(minpix))))

    return Nuv, minuv, maxuv 

Example 15

def _computeUnindexedVertexes(self):
        ## Given (Nv, 3, 3) array of vertexes-indexed-by-face, convert backward to unindexed vertexes
        ## This is done by collapsing into a list of 'unique' vertexes (difference < 1e-14) 
        
        ## I think generally this should be discouraged..
        faces = self._vertexesIndexedByFaces
        verts = {}  ## used to remember the index of each vertex position
        self._faces = np.empty(faces.shape[:2], dtype=np.uint)
        self._vertexes = []
        self._vertexFaces = []
        self._faceNormals = None
        self._vertexNormals = None
        for i in xrange(faces.shape[0]):
            face = faces[i]
            inds = []
            for j in range(face.shape[0]):
                pt = face[j]
                pt2 = tuple([round(x*1e14) for x in pt])  ## quantize to be sure that nearly-identical points will be merged
                index = verts.get(pt2, None)
                if index is None:
                    #self._vertexes.append(QtGui.QVector3D(*pt))
                    self._vertexes.append(pt)
                    self._vertexFaces.append([])
                    index = len(self._vertexes)-1
                    verts[pt2] = index
                self._vertexFaces[index].append(i)  # keep track of which vertexes belong to which faces
                self._faces[i,j] = index
        self._vertexes = np.array(self._vertexes, dtype=float)
    
    #def _setUnindexedFaces(self, faces, vertexes, vertexColors=None, faceColors=None):
        #self._vertexes = vertexes #[QtGui.QVector3D(*v) for v in vertexes]
        #self._faces = faces.astype(np.uint)
        #self._edges = None
        #self._vertexFaces = None
        #self._faceNormals = None
        #self._vertexNormals = None
        #self._vertexColors = vertexColors
        #self._faceColors = faceColors 

Example 16

def _computeEdges(self):
        if not self.hasFaceIndexedData:
            ## generate self._edges from self._faces
            nf = len(self._faces)
            edges = np.empty(nf*3, dtype=[('i', np.uint, 2)])
            edges['i'][0:nf] = self._faces[:,:2]
            edges['i'][nf:2*nf] = self._faces[:,1:3]
            edges['i'][-nf:,0] = self._faces[:,2]
            edges['i'][-nf:,1] = self._faces[:,0]
            
            # sort per-edge
            mask = edges['i'][:,0] > edges['i'][:,1]
            edges['i'][mask] = edges['i'][mask][:,::-1]
            
            # remove duplicate entries
            self._edges = np.unique(edges)['i']
            #print self._edges
        elif self._vertexesIndexedByFaces is not None:
            verts = self._vertexesIndexedByFaces
            edges = np.empty((verts.shape[0], 3, 2), dtype=np.uint)
            nf = verts.shape[0]
            edges[:,0,0] = np.arange(nf) * 3
            edges[:,0,1] = edges[:,0,0] + 1
            edges[:,1,0] = edges[:,0,1]
            edges[:,1,1] = edges[:,1,0] + 1
            edges[:,2,0] = edges[:,1,1]
            edges[:,2,1] = edges[:,0,0]
            self._edges = edges
        else:
            raise Exception("MeshData cannot generate edges--no faces in this data.") 

Example 17

def sphere(rows, cols, radius=1.0, offset=True):
        """
        Return a MeshData instance with vertexes and faces computed
        for a spherical surface.
        """
        verts = np.empty((rows+1, cols, 3), dtype=float)
        
        ## compute vertexes
        phi = (np.arange(rows+1) * np.pi / rows).reshape(rows+1, 1)
        s = radius * np.sin(phi)
        verts[...,2] = radius * np.cos(phi)
        th = ((np.arange(cols) * 2 * np.pi / cols).reshape(1, cols)) 
        if offset:
            th = th + ((np.pi / cols) * np.arange(rows+1).reshape(rows+1,1))  ## rotate each row by 1/2 column
        verts[...,0] = s * np.cos(th)
        verts[...,1] = s * np.sin(th)
        verts = verts.reshape((rows+1)*cols, 3)[cols-1:-(cols-1)]  ## remove redundant vertexes from top and bottom
        
        ## compute faces
        faces = np.empty((rows*cols*2, 3), dtype=np.uint)
        rowtemplate1 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 0]])) % cols) + np.array([[0, 0, cols]])
        rowtemplate2 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 1]])) % cols) + np.array([[cols, 0, cols]])
        for row in range(rows):
            start = row * cols * 2 
            faces[start:start+cols] = rowtemplate1 + row * cols
            faces[start+cols:start+(cols*2)] = rowtemplate2 + row * cols
        faces = faces[cols:-cols]  ## cut off zero-area triangles at top and bottom
        
        ## adjust for redundant vertexes that were removed from top and bottom
        vmin = cols-1
        faces[faces<vmin] = vmin
        faces -= vmin  
        vmax = verts.shape[0]-1
        faces[faces>vmax] = vmax
        
        return MeshData(vertexes=verts, faces=faces) 

Example 18

def cylinder(rows, cols, radius=[1.0, 1.0], length=1.0, offset=False):
        """
        Return a MeshData instance with vertexes and faces computed
        for a cylindrical surface.
        The cylinder may be tapered with different radii at each end (truncated cone)
        """
        verts = np.empty((rows+1, cols, 3), dtype=float)
        if isinstance(radius, int):
            radius = [radius, radius] # convert to list
        ## compute vertexes
        th = np.linspace(2 * np.pi, 0, cols).reshape(1, cols)
        r = np.linspace(radius[0],radius[1],num=rows+1, endpoint=True).reshape(rows+1, 1) # radius as a function of z
        verts[...,2] = np.linspace(0, length, num=rows+1, endpoint=True).reshape(rows+1, 1) # z
        if offset:
            th = th + ((np.pi / cols) * np.arange(rows+1).reshape(rows+1,1))  ## rotate each row by 1/2 column
        verts[...,0] = r * np.cos(th) # x = r cos(th)
        verts[...,1] = r * np.sin(th) # y = r sin(th)
        verts = verts.reshape((rows+1)*cols, 3) # just reshape: no redundant vertices...
        ## compute faces
        faces = np.empty((rows*cols*2, 3), dtype=np.uint)
        rowtemplate1 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 0]])) % cols) + np.array([[0, 0, cols]])
        rowtemplate2 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 1]])) % cols) + np.array([[cols, 0, cols]])
        for row in range(rows):
            start = row * cols * 2 
            faces[start:start+cols] = rowtemplate1 + row * cols
            faces[start+cols:start+(cols*2)] = rowtemplate2 + row * cols
        
        return MeshData(vertexes=verts, faces=faces) 

Example 19

def generateFaces(self):
        cols = self._z.shape[1]-1
        rows = self._z.shape[0]-1
        faces = np.empty((cols*rows*2, 3), dtype=np.uint)
        rowtemplate1 = np.arange(cols).reshape(cols, 1) + np.array([[0, 1, cols+1]])
        rowtemplate2 = np.arange(cols).reshape(cols, 1) + np.array([[cols+1, 1, cols+2]])
        for row in range(rows):
            start = row * cols * 2 
            faces[start:start+cols] = rowtemplate1 + row * (cols+1)
            faces[start+cols:start+(cols*2)] = rowtemplate2 + row * (cols+1)
        self._faces = faces 

Example 20

def _computeUnindexedVertexes(self):
        ## Given (Nv, 3, 3) array of vertexes-indexed-by-face, convert backward to unindexed vertexes
        ## This is done by collapsing into a list of 'unique' vertexes (difference < 1e-14) 
        
        ## I think generally this should be discouraged..
        faces = self._vertexesIndexedByFaces
        verts = {}  ## used to remember the index of each vertex position
        self._faces = np.empty(faces.shape[:2], dtype=np.uint)
        self._vertexes = []
        self._vertexFaces = []
        self._faceNormals = None
        self._vertexNormals = None
        for i in xrange(faces.shape[0]):
            face = faces[i]
            inds = []
            for j in range(face.shape[0]):
                pt = face[j]
                pt2 = tuple([round(x*1e14) for x in pt])  ## quantize to be sure that nearly-identical points will be merged
                index = verts.get(pt2, None)
                if index is None:
                    #self._vertexes.append(QtGui.QVector3D(*pt))
                    self._vertexes.append(pt)
                    self._vertexFaces.append([])
                    index = len(self._vertexes)-1
                    verts[pt2] = index
                self._vertexFaces[index].append(i)  # keep track of which vertexes belong to which faces
                self._faces[i,j] = index
        self._vertexes = np.array(self._vertexes, dtype=float)
    
    #def _setUnindexedFaces(self, faces, vertexes, vertexColors=None, faceColors=None):
        #self._vertexes = vertexes #[QtGui.QVector3D(*v) for v in vertexes]
        #self._faces = faces.astype(np.uint)
        #self._edges = None
        #self._vertexFaces = None
        #self._faceNormals = None
        #self._vertexNormals = None
        #self._vertexColors = vertexColors
        #self._faceColors = faceColors 

Example 21

def _computeEdges(self):
        if not self.hasFaceIndexedData:
            ## generate self._edges from self._faces
            nf = len(self._faces)
            edges = np.empty(nf*3, dtype=[('i', np.uint, 2)])
            edges['i'][0:nf] = self._faces[:,:2]
            edges['i'][nf:2*nf] = self._faces[:,1:3]
            edges['i'][-nf:,0] = self._faces[:,2]
            edges['i'][-nf:,1] = self._faces[:,0]
            
            # sort per-edge
            mask = edges['i'][:,0] > edges['i'][:,1]
            edges['i'][mask] = edges['i'][mask][:,::-1]
            
            # remove duplicate entries
            self._edges = np.unique(edges)['i']
            #print self._edges
        elif self._vertexesIndexedByFaces is not None:
            verts = self._vertexesIndexedByFaces
            edges = np.empty((verts.shape[0], 3, 2), dtype=np.uint)
            nf = verts.shape[0]
            edges[:,0,0] = np.arange(nf) * 3
            edges[:,0,1] = edges[:,0,0] + 1
            edges[:,1,0] = edges[:,0,1]
            edges[:,1,1] = edges[:,1,0] + 1
            edges[:,2,0] = edges[:,1,1]
            edges[:,2,1] = edges[:,0,0]
            self._edges = edges
        else:
            raise Exception("MeshData cannot generate edges--no faces in this data.") 

Example 22

def sphere(rows, cols, radius=1.0, offset=True):
        """
        Return a MeshData instance with vertexes and faces computed
        for a spherical surface.
        """
        verts = np.empty((rows+1, cols, 3), dtype=float)
        
        ## compute vertexes
        phi = (np.arange(rows+1) * np.pi / rows).reshape(rows+1, 1)
        s = radius * np.sin(phi)
        verts[...,2] = radius * np.cos(phi)
        th = ((np.arange(cols) * 2 * np.pi / cols).reshape(1, cols)) 
        if offset:
            th = th + ((np.pi / cols) * np.arange(rows+1).reshape(rows+1,1))  ## rotate each row by 1/2 column
        verts[...,0] = s * np.cos(th)
        verts[...,1] = s * np.sin(th)
        verts = verts.reshape((rows+1)*cols, 3)[cols-1:-(cols-1)]  ## remove redundant vertexes from top and bottom
        
        ## compute faces
        faces = np.empty((rows*cols*2, 3), dtype=np.uint)
        rowtemplate1 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 0]])) % cols) + np.array([[0, 0, cols]])
        rowtemplate2 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 1]])) % cols) + np.array([[cols, 0, cols]])
        for row in range(rows):
            start = row * cols * 2 
            faces[start:start+cols] = rowtemplate1 + row * cols
            faces[start+cols:start+(cols*2)] = rowtemplate2 + row * cols
        faces = faces[cols:-cols]  ## cut off zero-area triangles at top and bottom
        
        ## adjust for redundant vertexes that were removed from top and bottom
        vmin = cols-1
        faces[faces<vmin] = vmin
        faces -= vmin  
        vmax = verts.shape[0]-1
        faces[faces>vmax] = vmax
        
        return MeshData(vertexes=verts, faces=faces) 

Example 23

def cylinder(rows, cols, radius=[1.0, 1.0], length=1.0, offset=False):
        """
        Return a MeshData instance with vertexes and faces computed
        for a cylindrical surface.
        The cylinder may be tapered with different radii at each end (truncated cone)
        """
        verts = np.empty((rows+1, cols, 3), dtype=float)
        if isinstance(radius, int):
            radius = [radius, radius] # convert to list
        ## compute vertexes
        th = np.linspace(2 * np.pi, 0, cols).reshape(1, cols)
        r = np.linspace(radius[0],radius[1],num=rows+1, endpoint=True).reshape(rows+1, 1) # radius as a function of z
        verts[...,2] = np.linspace(0, length, num=rows+1, endpoint=True).reshape(rows+1, 1) # z
        if offset:
            th = th + ((np.pi / cols) * np.arange(rows+1).reshape(rows+1,1))  ## rotate each row by 1/2 column
        verts[...,0] = r * np.cos(th) # x = r cos(th)
        verts[...,1] = r * np.sin(th) # y = r sin(th)
        verts = verts.reshape((rows+1)*cols, 3) # just reshape: no redundant vertices...
        ## compute faces
        faces = np.empty((rows*cols*2, 3), dtype=np.uint)
        rowtemplate1 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 0]])) % cols) + np.array([[0, 0, cols]])
        rowtemplate2 = ((np.arange(cols).reshape(cols, 1) + np.array([[0, 1, 1]])) % cols) + np.array([[cols, 0, cols]])
        for row in range(rows):
            start = row * cols * 2 
            faces[start:start+cols] = rowtemplate1 + row * cols
            faces[start+cols:start+(cols*2)] = rowtemplate2 + row * cols
        
        return MeshData(vertexes=verts, faces=faces) 

Example 24

def generateFaces(self):
        cols = self._z.shape[1]-1
        rows = self._z.shape[0]-1
        faces = np.empty((cols*rows*2, 3), dtype=np.uint)
        rowtemplate1 = np.arange(cols).reshape(cols, 1) + np.array([[0, 1, cols+1]])
        rowtemplate2 = np.arange(cols).reshape(cols, 1) + np.array([[cols+1, 1, cols+2]])
        for row in range(rows):
            start = row * cols * 2 
            faces[start:start+cols] = rowtemplate1 + row * (cols+1)
            faces[start+cols:start+(cols*2)] = rowtemplate2 + row * (cols+1)
        self._faces = faces 

Example 25

def test_dtype_keyerrors_(self):
        # Ticket #1106.
        dt = np.dtype([('f1', np.uint)])
        assert_raises(KeyError, dt.__getitem__, "f2")
        assert_raises(IndexError, dt.__getitem__, 1)
        assert_raises(ValueError, dt.__getitem__, 0.0) 

Example 26

def get_val_indices_uniform(m_total, m_val):
    all_idxs = np.arange(m_total)    
    samps_per_class = m_val / NUM_CLASSES
    val_idxs = np.array([])
    for i in range(NUM_CLASSES):
        all_class_idxs = all_idxs[( all_idxs % NUM_CLASSES == i)]
        sel_class_idxs = np.random.choice(all_class_idxs, samps_per_class, replace=False)
        val_idxs = np.concatenate((val_idxs,sel_class_idxs))
    np.random.shuffle(val_idxs)
    return val_idxs.astype(np.uint) 

Example 27

def get_val_indices(m_total, m_val, info_mat):
    all_idxs = np.arange(m_total)    
    val_idxs = np.array([])
    for i in range(NUM_CLASSES):
        cat_for_val = np.random.choice(TR_CATS,1)[0]
        all_class_idxs = all_idxs[( all_idxs % NUM_CLASSES == i)]
        class_info = info_mat[all_class_idxs]
        sel_class_idxs = np.where(class_info[:,0] == cat_for_val)[0]
        val_idxs = np.concatenate((val_idxs,all_class_idxs[sel_class_idxs]))
    np.random.shuffle(val_idxs)
    return val_idxs.astype(np.uint) 

Example 28

def __init__(self, img):
		self.img = np.asarray(img, np.float32) # The image to be handled;
		self.img2 = img # The real image;
		self.rows, self.cols = get_size(img)
		self.mask = np.zeros((self.rows, self.cols), dtype = np.uint) # In this class, we use just one mask to contain the Ms and Ml in the paper; In the mask, the places where the value = self._SHADOW belongs to Ms, and other pixels belongs to Ml;
		self.trimap = np.zeros((self.rows, self.cols), dtype = np.uint) # The trimap containing info that whether a pixel is inside the shadow, outside the shadow, or unknown;
		self.mask_shadow = np.zeros((self.rows, self.cols), dtype = np.uint) # The area where shadow removal is required;

		self._SHADOW = 1 # The flag of shadow;
		self._LIT = 0 # The flag of lit;
		self._UNKNOWN = -1 # The flag of unknown;
		self._threshold = 0.1;
		self._drawing = True # The flag of drawing;
		self._drawn = False # The status of whether seed initialise is finished; 

Example 29

def saveTxt(filename, ndarray):
    with open(filename, 'w') as f:
        labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
        for row in ndarray:
            row_str = row.astype(str)
            label_str = labels[row[-1]]
            feature_str = ' '.join(row_str[:-1])
            f.write('|labels {} |features {}\n'.format(label_str, feature_str)) 

Example 30

def saveTxt(filename, ndarray):
    with open(filename, 'w') as f:
        labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
        for row in ndarray:
            row_str = row.astype(str)
            label_str = labels[row[-1]]
            feature_str = ' '.join(row_str[:-1])
            f.write('|labels {} |features {}\n'.format(label_str, feature_str)) 

Example 31

def saveTxt(filename, ndarray):
    with open(filename, 'w') as f:
        labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
        for row in ndarray:
            row_str = row.astype(str)
            label_str = labels[row[-1]]
            feature_str = ' '.join(row_str[:-1])
            f.write('|labels {} |features {}\n'.format(label_str, feature_str)) 

Example 32

def saveTxt(filename, ndarray):
    with open(filename, 'w') as f:
        labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
        for row in ndarray:
            row_str = row.astype(str)
            label_str = labels[row[-1]]
            feature_str = ' '.join(row_str[:-1])
            f.write('|labels {} |features {}\n'.format(label_str, feature_str)) 

Example 33

def test_dtype_keyerrors_(self):
        # Ticket #1106.
        dt = np.dtype([('f1', np.uint)])
        assert_raises(KeyError, dt.__getitem__, "f2")
        assert_raises(IndexError, dt.__getitem__, 1)
        assert_raises(ValueError, dt.__getitem__, 0.0) 

Example 34

def make_stack(series):
    stack_size = compute_stack_size(series)
    new = np.empty(stack_size, dtype=[('doc_index', np.uint), ('word', "S30"), ('value', np.float)])
    counter = 0
    for row in series.iteritems():
        for word in row[1]:
            new[counter] = (row[0], word, row[1][word])
            counter +=1
    return new 

Example 35

def get_articles_by_distance(article, corpus): #article is the row from the articles df
    article = corpus[article['index'],:]
    iterable = ((x, cosine_distance(article, corpus[x,:])) for x in range(corpus.shape[0]))
    articles_by_distance = np.fromiter(iterable, dtype='uint,float', count=corpus.shape[0])
    articles_by_distance = pd.DataFrame(articles_by_distance).rename(columns={'f1':'cosine_distance', 'f0':'index'}).sort_values(by='cosine_distance')
    return articles_by_distance[0:25] 

Example 36

def saveTxt(filename, ndarray):
    with open(filename, 'w') as f:
        labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
        for row in ndarray:
            row_str = row.astype(str)
            label_str = labels[row[-1]]
            feature_str = ' '.join(row_str[:-1])
            f.write('|labels {} |features {}\n'.format(label_str, feature_str)) 

Example 37

def backproject_depth(self, depth):
        constant_x = 1.0 / self.focal_x
        constant_y = 1.0 / self.focal_y
        row, col = depth.shape
        coords = np.zeros((row, col, 2), dtype=np.uint)
        coords[..., 0] = np.arange(row)[:, None]
        coords[..., 1] = np.arange(col)
        coords = coords.reshape((-1, 2))
        output = np.zeros((len(coords), 3))
        values = depth[coords[:, 0], coords[:, 1]]
        output[:, 0] = (coords[:, 1] - self.center_x) * values * constant_x
        output[:, 1] = (coords[:, 0] - self.center_y) * values * constant_y
        output[:, 2] = values
        return output 

Example 38

def testIntArray(self):
        arr = np.arange(100, dtype=np.int)
        dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
                  np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
        for dtype in dtypes:
            inpt = arr.astype(dtype)
            outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
            tm.assert_numpy_array_equal(inpt, outp) 

Example 39

def test_dtype_keyerrors_(self):
        # Ticket #1106.
        dt = np.dtype([('f1', np.uint)])
        assert_raises(KeyError, dt.__getitem__, "f2")
        assert_raises(IndexError, dt.__getitem__, 1)
        assert_raises(ValueError, dt.__getitem__, 0.0) 

Example 40

def transNK(self, d, N, problem_arg=0):
        # return np.arange(0, N), np.arange(0, N)
        # Each ind has 2*|ind|_0 samples
        indSet = setutil.GenTDSet(d, N, base=0)
        N_per_ind = 2**np.sum(indSet!=0, axis=1)
        if problem_arg == 1:
            N_per_ind[1:] /= 2
        _, k_ind = np.unique(np.sum(indSet, axis=1), return_inverse=True)
        k_of_N = np.repeat(k_ind, N_per_ind.astype(np.int))[:N]
        # N_of_k = [j+np.arange(0, i, dtype=np.uint) for i, j in
        #           zip(N_per_ind, np.hstack((np.array([0],
        #                                              dtype=np.uint),
        #                                     np.cumsum(N_per_ind)[:np.max(k_of_N)])))]
        return k_of_N 

Example 41

def test_json_numpy_encoder_int(self):
        assert (json.dumps(np.uint(10), cls=utils.JSONNumpyEncoder)
                == json.dumps(10)) 

Example 42

def test_json_numpy_encoder_int_array(self):
        array = np.arange(10, dtype=np.uint).reshape(2, 5)
        assert (json.dumps(array, cls=utils.JSONNumpyEncoder)
                == json.dumps(array.tolist())) 

Example 43

def test_serialize_json(self):
        array = np.arange(10, dtype=np.uint).reshape(2, 5)
        assert (utils.serialize_json(array)
                == json.dumps(array.tolist())) 

Example 44

def test_dtype_keyerrors_(self):
        # Ticket #1106.
        dt = np.dtype([('f1', np.uint)])
        assert_raises(KeyError, dt.__getitem__, "f2")
        assert_raises(IndexError, dt.__getitem__, 1)
        assert_raises(ValueError, dt.__getitem__, 0.0) 

Example 45

def is_integer(test_value):
    """ Check all available integer representations.

    @return: bool, True if the passed value is a integer, otherwise false.
    """

    return type(test_value) in [np.int, np.int8, np.int16, np.int32, np.int64,
                                np.uint, np.uint8, np.uint16, np.uint32,
                                np.uint64] 

Example 46

def mask_od_vessels(skel, od_center):

    # Create optic disk mask
    od_mask = np.zeros_like(skel, dtype=np.uint8)
    cv2.circle(od_mask, od_center, 30, (1, 1, 1), -1)
    od_mask_inv = np.invert(od_mask) / 255.

    skel = skel.astype(np.float)
    masked_skel = skel * od_mask_inv

    return masked_skel.astype(np.uint8)


# def line_diameters(edt, lines):
#
#     diameters = []
#
#     for line in lines:
#
#         p0, p1 = [np.asarray(pt) for pt in line]
#         vec = p1 - p0  # vector between segment end points
#         vec_len = np.linalg.norm(vec)
#
#         pts_along_line = np.uint(np.asarray([p0 + (i * vec) for i in np.arange(0., 1., 1. / vec_len)]))
#
#         for pt in pts_along_line:
#
#             try:
#                 diameters.append(edt[pt[0], pt[1]])
#             except IndexError:
#                 pass
#
#     return diameters 

Example 47

def test_dtype_keyerrors_(self):
        # Ticket #1106.
        dt = np.dtype([('f1', np.uint)])
        assert_raises(KeyError, dt.__getitem__, "f2")
        assert_raises(IndexError, dt.__getitem__, 1)
        assert_raises(ValueError, dt.__getitem__, 0.0) 

Example 48

def train(self, X: np.ndarray, Y: np.ndarray, **kwargs):
        """Trains the EPM on X and Y.

        Parameters
        ----------
        X : np.ndarray [n_samples, n_features (config + instance features)]
            Input data points.
        Y : np.ndarray [n_samples, n_objectives]
            The corresponding target values. n_objectives must match the
            number of target names specified in the constructor.

        Returns
        -------
        self : AbstractEPM
        """

        self.n_params = X.shape[1] - self.n_feats

        # reduce dimensionality of features of larger than PCA_DIM
        if self.pca and X.shape[0] > 1:
            X_feats = X[:, -self.n_feats:]
            # scale features
            X_feats = self.scaler.fit_transform(X_feats)
            X_feats = np.nan_to_num(X_feats)  # if features with max == min
            # PCA
            X_feats = self.pca.fit_transform(X_feats)
            X = np.hstack((X[:, :self.n_params], X_feats))
            if hasattr(self, "types"):
                # for RF, adapt types list
                # if X_feats.shape[0] < self.pca, X_feats.shape[1] ==
                # X_feats.shape[0]
                self.types = np.array(np.hstack((self.types[:self.n_params], np.zeros((X_feats.shape[1])))),
                                      dtype=np.uint)
        return self._train(X, Y) 

Example 49

def test_predict(self):
        rs = np.random.RandomState(1)
        X = rs.rand(20, 10)
        Y = rs.rand(10, 1)
        model = RandomForestWithInstances(np.zeros((10,), dtype=np.uint), bounds=np.array(
                list(map(lambda x: (0, 10), range(10))), dtype=object))
        model.train(X[:10], Y[:10])
        m_hat, v_hat = model.predict(X[10:])
        self.assertEqual(m_hat.shape, (10, 1))
        self.assertEqual(v_hat.shape, (10, 1)) 

Example 50

def test_train_with_pca(self):
        rs = np.random.RandomState(1)
        X = rs.rand(20, 20)
        F = rs.rand(10, 10)
        Y = rs.rand(20, 1)
        model = RandomForestWithInstances(np.zeros((20,), dtype=np.uint),
                                          np.array(list(map(lambda x: (0, 10), range(10))), dtype=object),
                                          pca_components=2,
                                          instance_features=F)
        model.train(X, Y)
        
        self.assertEqual(model.n_params, 10)
        self.assertEqual(model.n_feats, 10)
        self.assertIsNotNone(model.pca)
        self.assertIsNotNone(model.scaler) 
点赞