Python numpy.atleast_3d() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def to_rgb(img):
    """
    Converts the given array into a RGB image. If the number of channels is not
    3 the array is tiled such that it has 3 channels. Finally, the values are
    rescaled to [0,255) 
    
    :param img: the array to convert [nx, ny, channels]
    
    :returns img: the rgb image [nx, ny, 3]
    """
    img = np.atleast_3d(img)
    channels = img.shape[2]
    if channels < 3:
        img = np.tile(img, 3)
    
    img[np.isnan(img)] = 0
    img -= np.amin(img)
    img /= np.amax(img)
    img *= 255
    return img 

Example 2

def new_mesh_sampler(camera, render_source, engine):
    params = ensure_code_unit_params(camera._get_sampler_params(render_source))
    args = (
        np.atleast_3d(params['vp_pos']),
        np.atleast_3d(params['vp_dir']),
        params['center'],
        params['bounds'],
        np.atleast_3d(params['image']).astype('float64'),
        params['x_vec'],
        params['y_vec'],
        params['width'],
    )
    kwargs = {'lens_type': params['lens_type']}
    if engine == 'embree':
        sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs)
    elif engine == 'yt':
        sampler = bounding_volume_hierarchy.BVHMeshSampler(*args, **kwargs)
    return sampler 

Example 3

def new_interpolated_projection_sampler(camera, render_source):
    params = ensure_code_unit_params(camera._get_sampler_params(render_source))
    params.update(transfer_function=render_source.transfer_function)
    params.update(num_samples=render_source.num_samples)
    args = (
        np.atleast_3d(params['vp_pos']),
        np.atleast_3d(params['vp_dir']),
        params['center'],
        params['bounds'],
        params['image'],
        params['x_vec'],
        params['y_vec'],
        params['width'],
        params['num_samples'],
    )
    kwargs = {'lens_type': params['lens_type']}
    if render_source.zbuffer is not None:
        kwargs['zbuffer'] = render_source.zbuffer.z
    else:
        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
    sampler = InterpolatedProjectionSampler(*args, **kwargs)
    return sampler 

Example 4

def to_rgb(img):
    """
    Converts the given array into a RGB image. If the number of channels is not
    3 the array is tiled such that it has 3 channels. Finally, the values are
    rescaled to [0,255) 
    
    :param img: the array to convert [nx, ny, channels]
    
    :returns img: the rgb image [nx, ny, 3]
    """
    img = np.atleast_3d(img)
    channels = img.shape[2]
    if channels < 3:
        img = np.tile(img, 3)
    
    img[np.isnan(img)] = 0
    img -= np.amin(img)
    img /= np.amax(img)
    img *= 255
    return img 

Example 5

def getData(X,F,window=(30,30),N=10):
    """
    #????? ? ????????????? ?????
    ?????? ?N:int
    ??: X:ndarray
    ?????: F:ndarray
    ????? window:tuple[int,int]
    """
    Y=[]
    windowGen=[]
    ysets= np.unique(F)
    def toY(i,j):
        windowGen.append([i,j,window[0],window[1]])
        a=list(np.hstack( np.atleast_2d (F[i:i+window[0],j:j+window[1]])) )
        Y.append(np.array( [a.count(i) for i in ysets ]) )
        Y[-1]= ysets[Y[-1]==max(Y[-1])][0]
        return np.atleast_3d([X[i:i+window[0],j:j+window[1]]])
    
    Xs=sum([[ toY(i,j) for j in  range(0,X.shape[1]-window[1]-1,N)] 
     for i in range(0,X.shape[0]-window[0]-1,10)],[])
    return np.array(Xs),np.array(Y),windowGen 

Example 6

def __init__(self, directory, filepath, image_size):
        """
        Constructor for an JAFFEInstance object.

        Args:
            directory (str): Base directory where the example lives.
            filename (str): The name of the file of the example.
            image_size (tuple<int>): Size to resize the image to.
        """

        filename = filepath.split('/')[-1]

        self.image = misc.imread( os.path.join(directory, filepath) )
        # some of the jaffe images are 3-channel greyscale, some are 1-channel!
        self.image = np.atleast_3d(self.image)[...,0] # make image 2d for sure
        # Resize and scale values to [0 1]
        self.image = misc.imresize( self.image, image_size )
        self.image = self.image / 255.0
        ident, _, N, _ = filename.split('.')
        # Note: the emotion encoded in the filename is the dominant
        # scoring emotion, but we ignore this and use precise emotion scores
        # from the semantic ratings table
        self.identity, self.N = ident, int(N) - 1 # 0-based instance numbering 

Example 7

def rotate_points(image, obj_list, angle):
        '''
        Rotates the points of the given objects by the given angle. The points will be translated
        into absolute coordinates. Therefore the image (resp. its shape) is needed.
        '''
        
        rotated_obj_list = []
        cosOfAngle = np.cos(2 * np.pi / 360 * -angle)
        sinOfAngle = np.sin(2 * np.pi / 360 * -angle)
        image_shape = np.array(np.atleast_3d(image).shape[0:2][::-1])
        rot_mat = np.array([[cosOfAngle, -sinOfAngle], [sinOfAngle, cosOfAngle]])
        for obj in obj_list:
            obj_name = obj[0]
            point = obj[1] * image_shape
            rotated_point = AugmentationCreator._rotate_vector_around_point(image_shape/2, point, rot_mat) / image_shape
            rotated_obj_list.append((obj_name, (rotated_point[0], rotated_point[1])))
            
        return rotated_obj_list 

Example 8

def rotate_bboxes(image, obj_list, angle):
        '''
        Rotates the bounding boxes of the given objects by the given angle. The bounding box will be
        translated into absolute coordinates. Therefore the image (resp. its shape) is needed.
        '''
        
        rotated_obj_list = []
        cosOfAngle = np.cos(2 * np.pi / 360 * -angle)
        sinOfAngle = np.sin(2 * np.pi / 360 * -angle)
        image_shape = np.array(np.atleast_3d(image).shape[0:2][::-1])
        rot_mat = np.array([[cosOfAngle, -sinOfAngle], [sinOfAngle, cosOfAngle]])
        for obj in obj_list:
            obj_name = obj[0]
            upper_left = obj[1]['upper_left'] * image_shape
            lower_right = obj[1]['lower_right'] * image_shape
            upper_left = AugmentationCreator._rotate_vector_around_point(image_shape/2, upper_left, rot_mat) / image_shape
            lower_right = AugmentationCreator._rotate_vector_around_point(image_shape/2, lower_right, rot_mat) / image_shape
            rotated_obj_list.append((obj_name, {'upper_left' : upper_left, 'lower_right' : lower_right}))
            
        return rotated_obj_list 

Example 9

def apply_SL2C_elt_to_image(M_SL2C, src_image, out_size=None):

    s_im = np.atleast_3d(src_image)
    in_size = s_im.shape[:-1]
    if out_size is None:
        out_size = in_size
    #We are going to find the location in the source image that each pixel in the output image comes from

    #least squares matrix inversion (find X such that M @ X = I ==> X = inv(M) @ I = inv(M))
    Minv = np.linalg.lstsq(M_SL2C, np.eye(2))[0]
    #all of the x,y pairs in o_im:
    pts_out = np.indices(out_size).reshape((2,-1)) #results in a 2 x (num pixels) array of indices
    pts_out_a = angles_from_pixel_coords(pts_out, out_size)
    pts_out_s = sphere_from_angles(pts_out_a)
    pts_out_c = CP1_from_sphere(pts_out_s)
    pts_in_c = np.dot(Minv, pts_out_c) # (2x2) @ (2xn) => (2xn)
    pts_in_s = sphere_from_CP1(pts_in_c)
    pts_in_a = angles_from_sphere(pts_in_s)
    pts_in = pixel_coords_from_angles(pts_in_a, in_size)
    #reshape pts into 2 x image_shape for the interpolation
    o_im = get_interpolated_pixel_color(pts_in.reshape((2,)+out_size), s_im, in_size)

    return o_im 

Example 10

def _addto_netcdf(nf,var,data,units,long_name,notime=False):

    dimensions = nf.dimensions.items()

    if notime:
        dims = []
    else:
        dims = ['time',]
        
    for data_len in data.shape:
        dims.extend([dim for dim,dim_len in dimensions \
                     if dim_len.size==data_len])

    # WARNING: This only works for 2D lon/lat, this needs to change
    if len(dims)==3:
        input_data = np.atleast_3d(data.T).T
    else:
        input_data = data
        
    _create_variable(nf,var,tuple(dims))
    _insert_data(nf,var,input_data,units,long_name) 

Example 11

def spacegroup_from_data(no=None, symbol=None, setting=1, 
                         centrosymmetric=None, scaled_primitive_cell=None, 
                         reciprocal_cell=None, subtrans=None, sitesym=None, 
                         rotations=None, translations=None, datafile=None):
    """Manually create a new space group instance.  This might be
    usefull when reading crystal data with its own spacegroup
    definitions."""
    if no is not None:
        spg = Spacegroup(no, setting, datafile)
    elif symbol is not None:
        spg = Spacegroup(symbol, setting, datafile)
    else:
        raise SpacegroupValueError('either *no* or *symbol* must be given')

    have_sym = False
    if centrosymmetric is not None:
        spg._centrosymmetric = bool(centrosymmetric)
    if scaled_primitive_cell is not None:
        spg._scaled_primitive_cell = np.array(scaled_primitive_cell)
    if reciprocal_cell is not None:
        spg._reciprocal_cell = np.array(reciprocal_cell)
    if subtrans is not None:
        spg._subtrans = np.atleast_2d(subtrans)
        spg._nsubtrans = spg._subtrans.shape[0]
    if sitesym is not None:
        spg._rotations, spg._translations = parse_sitesym(sitesym)
        have_sym = True
    if rotations is not None:
        spg._rotations = np.atleast_3d(rotations)
        have_sym = True
    if translations is not None:
        spg._translations = np.atleast_2d(translations)
        have_sym = True
    if have_sym:
        if spg._rotations.shape[0] != spg._translations.shape[0]:
            raise SpacegroupValueError('inconsistent number of rotations and '
                                       'translations')
        spg._nsymop = spg._rotations.shape[0]
    return spg 

Example 12

def __new__(self, time, y, x, clf='lda', cvtype=None, clfArg={},
                cvArg={}):

        self.y = np.ravel(y)
        self.time = time

        # Define clf if it's not defined :
        if isinstance(clf, (int, str)):
            clf = defClf(y, clf=clf, **clfArg)
        self.clf = clf

        # Define cv if it's not defined :
        if isinstance(cvtype, str) and (cvtype is not None):
            cvtype = defCv(y, cvtype=cvtype, rep=1, **cvArg)
        self.cv = cvtype
        if isinstance(cvtype, list):
            cvtype = cvtype[0]
        # Check the size of x:
        x = np.atleast_3d(x)
        npts, ntrials = len(time), len(y)
        if x.shape[0] is not npts:
            raise ValueError('First dimension of x must be '+str(npts))
        if x.shape[1] is not ntrials:
            raise ValueError('Second dimension of x must be '+str(ntrials))

        da = np.zeros([npts, npts])
        # Training dimension
        for k in range(npts):
            xx = x[k, ...]
            # Testing dimension
            for i in range(npts):
                xy = x[i, ...]
                # If cv is defined, do a cv on the diagonal
                if (k == i) and (cvtype is not None):
                    da[i, k] = _cvscore(xx, y, clf, self.cv.cvr[0])[0]/100
                # If cv is not defined, let the diagonal at zero
                elif (k == i) and (cvtype is None):
                    pass
                else:
                    da[i, k] = accuracy_score(y, clf.fit(xx, y).predict(xy))
        return 100*da 

Example 13

def get_img_layout(
        file_records, frames_layout, tags,
        z_slice = 15,
        spacing_v = 5,
        spacing_h = 5,
        color=255,
):
    img_layout = None
    n_rows = len(frames_layout)
    n_cols = len(tags)
    for r, idx_frame in enumerate(frames_layout):
        print('frame:', idx_frame)
        tag_path_dict = file_records.get(idx_frame)
        if tag_path_dict is None:
            print('frame {:d} did not exist. skipping....'.format(idx_frame))
            continue
        offset_r = None
        for c, tag in enumerate(tags):
            path_this_tag = tag_path_dict.get(tag)
            print('using path:', path_this_tag)
            if path_this_tag is None:
                continue
            ar = tifffile.imread(path_this_tag)
            if img_layout is None:
                shape_layout = (
                    ar.shape[1]*n_rows + (n_rows - 1)*spacing_v,
                    ar.shape[2]*n_cols + (n_cols - 1)*spacing_h,
                    3,  # assume color image
                )
                img_layout = np.ones(shape_layout, dtype=np.uint8)*color
            if offset_r is None:
                offset_r = r*(ar.shape[1] + spacing_v)
            offset_c = c*(ar.shape[2] + spacing_h)
            img = np.atleast_3d(ar[z_slice, :, :, ])
            if (r, c) == (0, 0):
                functions.add_scale_bar(img, 20, 0.3)
            img_layout[offset_r:offset_r + ar.shape[1], offset_c:offset_c + ar.shape[2], ] = img
    return img_layout 

Example 14

def get_sampler_args(self, image):
        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
                self.back_center,
                (-self.width[0]/2.0, self.width[0]/2.0,
                 -self.width[1]/2.0, self.width[1]/2.0),
                image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                np.array(self.width, dtype='float64'), self.transfer_function, self.sub_samples)
        return args, {'lens_type': 'plane-parallel'} 

Example 15

def get_sampler_args(self, image):
        rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
                self.back_center,
            (-self.width[0]/2., self.width[0]/2.,
             -self.width[1]/2., self.width[1]/2.),
            image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                np.array(self.width, dtype='float64'), self.sub_samples)
        return args, {'lens_type': 'plane-parallel'} 

Example 16

def new_volume_render_sampler(camera, render_source):
    params = ensure_code_unit_params(camera._get_sampler_params(render_source))
    params.update(transfer_function=render_source.transfer_function)
    params.update(transfer_function=render_source.transfer_function)
    params.update(num_samples=render_source.num_samples)
    args = (
        np.atleast_3d(params['vp_pos']),
        np.atleast_3d(params['vp_dir']),
        params['center'],
        params['bounds'],
        params['image'],
        params['x_vec'],
        params['y_vec'],
        params['width'],
        params['transfer_function'],
        params['num_samples'],
    )
    kwargs = {'lens_type': params['lens_type']}
    if "camera_data" in params:
        kwargs['camera_data'] = params['camera_data']
    if render_source.zbuffer is not None:
        kwargs['zbuffer'] = render_source.zbuffer.z
        args[4][:] = np.reshape(render_source.zbuffer.rgba[:], \
            (camera.resolution[0], camera.resolution[1], 4))
    else:
        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")

    sampler = VolumeRenderSampler(*args, **kwargs)
    return sampler 

Example 17

def select_blocks(self, selector):
        mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
        slicer = OctreeSubsetBlockSlice(self)
        for i, sl in slicer:
            yield sl, np.atleast_3d(mask[i,...]) 

Example 18

def getData(X,F,window=(30,30),N=10):
    Y=[]
    
    windowGen=[]
#    ysets= np.unique(F)
    def toY(i,j):
        windowGen.append([i,j,window[0],window[1]])
        Y.append(np.sum(F[i:i+window[0],j:j+window[1]])/(window[0]*window[1]))
#        Y[-1]= ysets[Y[-1]==max(Y[-1])][0]
        Y[-1]=1 if Y[-1]>0.6 else 0
        return np.atleast_3d([X[i:i+window[0],j:j+window[1]]])
    
    Xs=sum([[ toY(i,j) for j in  range(0,X.shape[1]-window[1]-1,N)] 
     for i in range(0,X.shape[0]-window[0]-1,10)],[])
    return np.array(Xs),np.array(Y),windowGen 

Example 19

def fit(self, epochs_data, y):
        """Standardizes data across channels

        Parameters
        ----------
        epochs_data : array, shape (n_epochs, n_channels, n_times)
            The data to concatenate channels.
        y : array, shape (n_epochs,)
            The label for each epoch.

        Returns
        -------
        self : instance of Scaler
            Returns the modified instance.
        """
        if not isinstance(epochs_data, np.ndarray):
            raise ValueError("epochs_data should be of type ndarray (got %s)."
                             % type(epochs_data))

        X = np.atleast_3d(epochs_data)

        picks_list = dict()
        picks_list['mag'] = pick_types(self.info, meg='mag', ref_meg=False,
                                       exclude='bads')
        picks_list['grad'] = pick_types(self.info, meg='grad', ref_meg=False,
                                        exclude='bads')
        picks_list['eeg'] = pick_types(self.info, eeg=True, ref_meg=False,
                                       meg=False, exclude='bads')

        self.picks_list_ = picks_list

        for key, this_pick in picks_list.items():
            if self.with_mean:
                ch_mean = X[:, this_pick, :].mean(axis=1)[:, None, :]
                self.ch_mean_[key] = ch_mean  # TODO rename attribute
            if self.with_std:
                ch_std = X[:, this_pick, :].mean(axis=1)[:, None, :]
                self.std_[key] = ch_std  # TODO rename attribute

        return self 

Example 20

def transform(self, epochs_data, y=None):
        """Standardizes data across channels

        Parameters
        ----------
        epochs_data : array, shape (n_epochs, n_channels, n_times)
            The data.
        y : None | array, shape (n_epochs,)
            The label for each epoch.
            If None not used. Defaults to None.

        Returns
        -------
        X : array, shape (n_epochs, n_channels, n_times)
            The data concatenated over channels.
        """
        if not isinstance(epochs_data, np.ndarray):
            raise ValueError("epochs_data should be of type ndarray (got %s)."
                             % type(epochs_data))

        X = np.atleast_3d(epochs_data)

        for key, this_pick in six.iteritems(self.picks_list_):
            if self.with_mean:
                X[:, this_pick, :] -= self.ch_mean_[key]
            if self.with_std:
                X[:, this_pick, :] /= self.std_[key]

        return X 

Example 21

def inverse_transform(self, epochs_data, y=None):
        """ Inverse standardization of data across channels

        Parameters
        ----------
        epochs_data : array, shape (n_epochs, n_channels, n_times)
            The data.
        y : None | array, shape (n_epochs,)
            The label for each epoch.
            If None not used. Defaults to None.

        Returns
        -------
        X : array, shape (n_epochs, n_channels, n_times)
            The data concatenated over channels.
        """
        if not isinstance(epochs_data, np.ndarray):
            raise ValueError("epochs_data should be of type ndarray (got %s)."
                             % type(epochs_data))

        X = np.atleast_3d(epochs_data)

        for key, this_pick in six.iteritems(self.picks_list_):
            if self.with_mean:
                X[:, this_pick, :] += self.ch_mean_[key]
            if self.with_std:
                X[:, this_pick, :] *= self.std_[key]

        return X 

Example 22

def transform(self, epochs_data, y=None):
        """For each epoch, concatenate data from different channels into a single
        feature vector.

        Parameters
        ----------
        epochs_data : array, shape (n_epochs, n_channels, n_times)
            The data.
        y : None | array, shape (n_epochs,)
            The label for each epoch.
            If None not used. Defaults to None.

        Returns
        -------
        X : array, shape (n_epochs, n_channels * n_times)
            The data concatenated over channels
        """
        if not isinstance(epochs_data, np.ndarray):
            raise ValueError("epochs_data should be of type ndarray (got %s)."
                             % type(epochs_data))

        epochs_data = np.atleast_3d(epochs_data)

        n_epochs, n_channels, n_times = epochs_data.shape
        X = epochs_data.reshape(n_epochs, n_channels * n_times)
        # save attributes for inverse_transform
        self.n_epochs = n_epochs
        self.n_channels = n_channels
        self.n_times = n_times

        return X 

Example 23

def rotate_image(image, angle):
        '''
        Rotates the given image by the given angle.
        '''
        
        rows, cols, _ = np.atleast_3d(image).shape
        rot_mat = cv2.getRotationMatrix2D((cols/2, rows/2), angle, 1)
        
        return cv2.warpAffine(image, rot_mat, (cols, rows)) 

Example 24

def translate_image(image, translation):
        '''
        Translates the given image with the given translation vector.
        '''
        
        rows, cols, _ = np.atleast_3d(image).shape
        trans_mat = np.array([[1, 0, translation[0]*cols], [0, 1, translation[1]*rows]])
        
        return cv2.warpAffine(image, trans_mat, (cols, rows)) 

Example 25

def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
    """Graph of the pixel-to-pixel gradient connections

    Edges are weighted with the gradient values.

    Read more in the :ref:`User Guide <image_feature_extraction>`.

    Parameters
    ----------
    img : ndarray, 2D or 3D
        2D or 3D image
    mask : ndarray of booleans, optional
        An optional mask of the image, to consider only part of the
        pixels.
    return_as : np.ndarray or a sparse matrix class, optional
        The class to use to build the returned adjacency matrix.
    dtype : None or dtype, optional
        The data of the returned sparse matrix. By default it is the
        dtype of img

    Notes
    -----
    For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
    by returning a dense np.matrix instance.  Going forward, np.ndarray
    returns an np.ndarray, as expected.

    For compatibility, user code relying on this method should wrap its
    calls in ``np.asarray`` to avoid type issues.
    """
    img = np.atleast_3d(img)
    n_x, n_y, n_z = img.shape
    return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype) 

Example 26

def get_interpolated_pixel_color(pts, s_im, size):
    """given pts in floats, linear interpolate pixel values nearby to get a good colour"""
    pts = clamp(pts, size)

    s_im = np.atleast_3d(s_im)
    ys,xs = size
    ycoords, xcoords = np.arange(ys), np.arange(xs)
    out = np.empty(pts.shape[1:] + (s_im.shape[-1],),dtype=s_im.dtype)
    for i in range(s_im.shape[-1]): #loop over color channels
        map_coordinates(s_im[...,i],pts,out[...,i],mode='nearest')
    return out 

Example 27

def get_interpolated_pixel_color_rbspline(pts, s_im, size):
    """given pts in floats, linear interpolate pixel values nearby to get a good colour"""
    pts = clamp(pts, size)

    s_im = np.atleast_3d(s_im)
    ys,xs = size
    ycoords, xcoords = np.arange(ys), np.arange(xs)
    out = np.empty(pts.shape[1:] + (s_im.shape[-1],),dtype=s_im.dtype)
    
    pts_vec = pts.reshape((2,-1))
    out_vec = out.reshape((-1,s_im.shape[-1])) #flatten for easier vectorization
    for i in range(s_im.shape[-1]): #loop over color channels
        rbspline = RectBivariateSpline(ycoords, xcoords, s_im[...,i])
        out_vec[:,i] = rbspline.ev(pts_vec[0],pts_vec[1])
    return out


### Functions generating SL(2,C) matrices ###
# Do not need to be vectorized # 

Example 28

def _read_fluid_selection(self, chunks, selector, fields, size):
        rv = {}
        # Now we have to do something unpleasant
        chunks = list(chunks)
        if isinstance(selector, GridSelector):
            if not (len(chunks) == len(chunks[0].objs) == 1):
                raise RuntimeError
            g = chunks[0].objs[0]
            f = h5py.File(g.filename, 'r')
            gds = f.get("/Grid%08i" % g.id)
            for ftype, fname in fields:
                rv[(ftype, fname)] = np.atleast_3d(
                    gds.get(fname).value.transpose())
            f.close()
            return rv
        if size is None:
            size = sum((g.count(selector) for chunk in chunks
                        for g in chunk.objs))
        for field in fields:
            ftype, fname = field
            fsize = size
            rv[field] = np.empty(fsize, dtype="float64")
        ng = sum(len(c.objs) for c in chunks)
        mylog.debug("Reading %s cells of %s fields in %s grids",
                   size, [f2 for f1, f2 in fields], ng)
        ind = 0
        for chunk in chunks:
            f = None
            for g in chunk.objs:
                if f is None:
                    #print "Opening (count) %s" % g.filename
                    f = h5py.File(g.filename, "r")
                gds = f.get("/Grid%08i" % g.id)
                if gds is None:
                    gds = f
                for field in fields:
                    ftype, fname = field
                    ds = np.atleast_3d(gds.get(fname).value.transpose())
                    nd = g.select(selector, ds, rv[field], ind) # caches
                ind += nd
            f.close()
        return rv 

Example 29

def _to_graph(n_x, n_y, n_z, mask=None, img=None,
              return_as=sparse.coo_matrix, dtype=None):
    """Auxiliary function for img_to_graph and grid_to_graph
    """
    edges = _make_edges_3d(n_x, n_y, n_z)

    if dtype is None:
        if img is None:
            dtype = np.int
        else:
            dtype = img.dtype

    if img is not None:
        img = np.atleast_3d(img)
        weights = _compute_gradient_3d(edges, img)
        if mask is not None:
            edges, weights = _mask_edges_weights(mask, edges, weights)
            diag = img.squeeze()[mask]
        else:
            diag = img.ravel()
        n_voxels = diag.size
    else:
        if mask is not None:
            mask = astype(mask, dtype=np.bool, copy=False)
            mask = np.asarray(mask, dtype=np.bool)
            edges = _mask_edges_weights(mask, edges)
            n_voxels = np.sum(mask)
        else:
            n_voxels = n_x * n_y * n_z
        weights = np.ones(edges.shape[1], dtype=dtype)
        diag = np.ones(n_voxels, dtype=dtype)

    diag_idx = np.arange(n_voxels)
    i_idx = np.hstack((edges[0], edges[1]))
    j_idx = np.hstack((edges[1], edges[0]))
    graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
                              (np.hstack((i_idx, diag_idx)),
                               np.hstack((j_idx, diag_idx)))),
                              (n_voxels, n_voxels),
                              dtype=dtype)
    if return_as is np.ndarray:
        return graph.toarray()
    return return_as(graph) 
点赞