Python numpy.apply_along_axis() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def _logcdf(self, samples):
        lower = np.full(2, -np.inf)
        upper = norm.ppf(samples)
        limit_flags = np.zeros(2)
        if upper.shape[0] > 0:

            def func1d(upper1d):
                '''
                Calculates the multivariate normal cumulative distribution
                function of a single sample.
                '''
                return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]

            vals = np.apply_along_axis(func1d, -1, upper)
        else:
            vals = np.empty((0, ))
        old_settings = np.seterr(divide='ignore')
        vals = np.log(vals)
        np.seterr(**old_settings)
        vals[np.any(samples == 0.0, axis=1)] = -np.inf
        vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
        vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
        return vals 

Example 2

def _nanmedian(a, axis=None, out=None, overwrite_input=False):
    """
    Private function that doesn't support extended axis or keepdims.
    These methods are extended to this function using _ureduce
    See nanmedian for parameter usage

    """
    if axis is None or a.ndim == 1:
        part = a.ravel()
        if out is None:
            return _nanmedian1d(part, overwrite_input)
        else:
            out[...] = _nanmedian1d(part, overwrite_input)
            return out
    else:
        # for small medians use sort + indexing which is still faster than
        # apply_along_axis
        if a.shape[axis] < 400:
            return _nanmedian_small(a, axis, out, overwrite_input)
        result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
        if out is not None:
            out[...] = result
        return result 

Example 3

def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
                   interpolation='linear', keepdims=False):
    """
    Private function that doesn't support extended axis or keepdims.
    These methods are extended to this function using _ureduce
    See nanpercentile for parameter usage

    """
    if axis is None:
        part = a.ravel()
        result = _nanpercentile1d(part, q, overwrite_input, interpolation)
    else:
        result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
                                     overwrite_input, interpolation)
        # apply_along_axis fills in collapsed axis with results.
        # Move that axis to the beginning to match percentile's
        # convention.
        if q.ndim != 0:
            result = np.rollaxis(result, axis)   

    if out is not None:
        out[...] = result
    return result 

Example 4

def _fix_alpha_channel(self):
        # This is a fix for a bug where the Alpha channel was dropped.
        colors3to4 = [(c[:3], c[3]) for c in self.names.keys()]
        colors3to4 = dict(colors3to4)
        assert(len(colors3to4) == len(self.names)) # Dropped alpha channel causes colors to collide :(
        for lbl in self.labels:
            if lbl is None:
                continue    # No label file created yet.
            img  = Image.open(lbl)
            size = img.size
            img  = np.array(img)
            if img.shape[2] == 4:
                continue    # Image has alpha channel, good.
            elif img.shape[2] == 3:
                # Lookup each (partial) color and find what its alpha should be.
                alpha   = np.apply_along_axis(lambda c: colors3to4[tuple(c)], 2, img)
                data    = np.dstack([img, np.array(alpha, dtype=np.uint8)])
                new_img = Image.frombuffer("RGBA", size, data, "raw", "RGBA", 0, 1)
                new_img.save(lbl)
                print("FIXED", lbl) 

Example 5

def plot_cost_to_go_mountain_car(env, estimator, num_tiles=20):
    x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
    y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
    X, Y = np.meshgrid(x, y)
    Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))

    fig = plt.figure(figsize=(10, 5))
    ax = fig.add_subplot(111, projection='3d')
    surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
                           cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
    ax.set_xlabel('Position')
    ax.set_ylabel('Velocity')
    ax.set_zlabel('Value')
    ax.set_title("Mountain \"Cost To Go\" Function")
    fig.colorbar(surf)
    plt.show() 

Example 6

def boundary_tree_to_image(boundary_tree, size, image_mesh):
    arr = array('B')
    np.apply_along_axis(lambda c: arr.extend(boundary_tree.query(c)), 1, image_mesh)
    return Image.frombytes("RGB", size, arr) 

Example 7

def __query_by_committee(self, clf, X_unlabeled):
        num_classes = len(clf[0].classes_)
        C = len(clf)
        preds = []

        if self.strategy == 'vote_entropy':
            for model in clf:
                y_out = map(int, model.predict(X_unlabeled))
                preds.append(np.eye(num_classes)[y_out])

            votes = np.apply_along_axis(np.sum, 0, np.stack(preds)) / C
            return np.apply_along_axis(entropy, 1, votes)

        elif self.strategy == 'average_kl_divergence':
            for model in clf:
                preds.append(model.predict_proba(X_unlabeled))

            consensus = np.mean(np.stack(preds), axis=0)
            divergence = []
            for y_out in preds:
                divergence.append(entropy(consensus.T, y_out.T))
            
            return np.apply_along_axis(np.mean, 0, np.stack(divergence)) 

Example 8

def estimate_1hot_cost(X, is_categorical):
    """
    Calculate the "memory expansion" after applying one-hot encoding.

    :param X: array-like
        The input data array
    :param is_categorical: boolean array-like
        Array of vector form that indicates
        whether each features of X is categorical

    :return: int
        Calculated memory size in byte scale (expansion)
    """
    n_columns = 0
    count_labels_v = lambda v: np.sum(np.isfinite(np.unique(v))) - 1
    n_labels = np.apply_along_axis(count_labels_v, 0, X)
    n_columns += np.sum(n_labels[is_categorical])

    estimated_memory = n_columns * X.shape[0] * X.dtype.itemsize
    return estimated_memory 

Example 9

def load_dataset():
    if(not os.path.exists("./dataset/training.csv")):
        print("dataset does not exist")
        raise Exception

    #load dataset
    labeled_image = pd.read_csv("./dataset/training.csv")

    #preprocessing dataframe
    image = np.array(labeled_image["Image"].values).reshape(-1,1)
    image = np.apply_along_axis(lambda img: (img[0].split()),1,image)
    image = image.astype(np.int32) #because train_img elements are string before preprocessing
    image = image.reshape(-1,96*96) # data 96 * 96 size image

    label = labeled_image.values[:,:-1]
    label = label.astype(np.float32)

    #nan value to mean value
    col_mean = np.nanmean(label, axis=0)
    indices = np.where(np.isnan(label))
    label[indices] = np.take(col_mean, indices[1])

    return image, label 

Example 10

def get_his_std_qi( data_pixel_qi, max_cts=None):
    '''
    YG. Dev 16, 2016
    Calculate the photon histogram for one q by giving 
    Parameters:
        data_pixel_qi: one-D array, for the photon counts
        max_cts: for bin max, bin will be [0,1,2,..., max_cts]
    Return:
        bins
        his
        std    
    '''
    if max_cts is None:
        max_cts = np.max( data_pixel_qi ) +1
    bins = np.arange(max_cts)
    dqn, dqm = data_pixel_qi.shape
    #get histogram here
    H = np.apply_along_axis(np.bincount, 1, np.int_(data_pixel_qi), minlength= max_cts )/dqm
    #do average for different frame
    his = np.average( H, axis=0)
    std = np.std( H, axis=0 )
    #cal average photon counts
    kmean= np.average(data_pixel_qi )
    return bins, his, std, kmean 

Example 11

def _nanmedian(a, axis=None, out=None, overwrite_input=False):
    """
    Private function that doesn't support extended axis or keepdims.
    These methods are extended to this function using _ureduce
    See nanmedian for parameter usage

    """
    if axis is None or a.ndim == 1:
        part = a.ravel()
        if out is None:
            return _nanmedian1d(part, overwrite_input)
        else:
            out[...] = _nanmedian1d(part, overwrite_input)
            return out
    else:
        # for small medians use sort + indexing which is still faster than
        # apply_along_axis
        if a.shape[axis] < 400:
            return _nanmedian_small(a, axis, out, overwrite_input)
        result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
        if out is not None:
            out[...] = result
        return result 

Example 12

def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
                   interpolation='linear', keepdims=False):
    """
    Private function that doesn't support extended axis or keepdims.
    These methods are extended to this function using _ureduce
    See nanpercentile for parameter usage

    """
    if axis is None:
        part = a.ravel()
        result = _nanpercentile1d(part, q, overwrite_input, interpolation)
    else:
        result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
                                     overwrite_input, interpolation)
        # apply_along_axis fills in collapsed axis with results.
        # Move that axis to the beginning to match percentile's
        # convention.
        if q.ndim != 0:
            result = np.rollaxis(result, axis)   

    if out is not None:
        out[...] = result
    return result 

Example 13

def punion(probs, axis=None):
    """Find the unions of given list of probabilities assuming indepdendence.

    Args:
        probs:  Matrix-like probabilities to union.

        axis:   Axis along which union will be performed.

    Returns:
        Matrix of probability unions.
    """
    def punion1d(probs):
        """Union for 1d array.
        """
        finalp = 0.0
        for p in probs:
            finalp += p*(1.0-finalp)
        return finalp

    probs = np.asarray(probs)

    if axis is None:
        return punion1d(probs.reshape((-1,)))
    else:
        return np.apply_along_axis(func1d=punion1d, axis=axis, arr=probs) 

Example 14

def addFamily(X):
    # Family size: index 8
    newCol = np.array(X[:, 1] + X[:, 2], np.newaxis)
    newCol = newCol.reshape((len(newCol), 1))
    X = np.hstack( (X,newCol) )

    # Family category: index 9
    def determineFamilyCat(row):
        # print('row shape = {}, cont = {}'.format(row.shape, row))
        if row[8] == 1:
            return 0   # singles
        elif 2<=row[8]<=4:
            return 1   # normal size
        else:
            return 2   # large size

    newCol = np.apply_along_axis(determineFamilyCat, 1, X)
    newCol = newCol.reshape((len(newCol), 1))
    X = np.hstack((X,newCol))

    return X


# Not used 

Example 15

def update_recover_maps(self):
        max_distance = min(self.width // 2, self.height // 2, 15)
        #self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        #self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            #self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        #self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        #self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 16

def update_recover_maps(self):
        max_distance = min(self.width // 2, self.height // 2, 15)
        #self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        #self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            #self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        #self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        #self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 17

def update_recover_maps(self):
        max_distance = min(self.width // 2, self.height // 2, 15)
        #self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        #self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            #self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        #self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        #self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 18

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 19

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 20

def update_recover_maps(self):
        max_distance = min(self.width // 2, self.height // 2, 15)
        #self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        #self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            #self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        #self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        #self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 21

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 22

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 23

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 24

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 25

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 26

def update_recover_maps(self):
        max_distance = self.width // 2
        self.recover_map = np.zeros((max_distance + 1, self.width, self.height))
        self.recover_map[0] = np.divide(self.strength_map, self.production_map_01) * (self.is_neutral_map - self.combat_zone_map)
        
        self.prod_over_str_map = np.zeros((max_distance + 1, self.width, self.height))
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        new_str_map = np.copy(self.strength_map)
        new_str_map[new_str_map == 0] = 2
        #self.prod_over_str_map[0] = np.divide(self.production_map, self.strength_map_01) * (self.is_neutral_map - self.combat_zone_map)
        self.prod_over_str_map[0] = np.divide(self.production_map, new_str_map) * (self.is_neutral_map - self.combat_zone_map)
        self.recover_map[0] = 1 / np.maximum(self.prod_over_str_map[0], 0.01)
        
        for distance in range(1, max_distance + 1):
            self.prod_over_str_map[distance] = spread_n(self.prod_over_str_map[distance - 1], 1)
            self.prod_over_str_map[distance][self.prod_over_str_map[distance-1] == 0] = 0
            self.prod_over_str_map[distance] = self.prod_over_str_map[distance] / 5
            self.recover_map[distance] = 1 / np.maximum(self.prod_over_str_map[distance], 0.01)

        self.prod_over_str_max_map = np.apply_along_axis(np.max, 0, self.prod_over_str_map)
        self.recover_max_map = 1 / np.maximum(self.prod_over_str_max_map, 0.01)
        self.prod_over_str_avg_map = np.apply_along_axis(np.mean, 0, self.prod_over_str_map)
        self.recover_avg_map = 1 / np.maximum(self.prod_over_str_avg_map, 0.01)
        self.prod_over_str_wtd_map = (self.prod_over_str_max_map + self.prod_over_str_avg_map) / 2
        self.recover_wtd_map = 1 / np.maximum(self.prod_over_str_wtd_map, 0.01) 

Example 27

def match_matrix(event: Event):
    """Returns a numpy participation matrix for the qualification matches in this event, used for calculating OPR.

        Each row in the matrix corresponds to a single alliance in a match, meaning that there will be two rows (one for
    red, one for blue) per match. Each column represents a single team, ordered by team number. If a team participated
    on a certain alliance, the value at that row and column would be 1, otherwise, it would be 0. For example, an
    event with teams 1-7 that featured a match that pitted teams 1, 3, and 5 against 2, 4, and 6 would have a match
    matrix that looks like this (sans labels):

                                #1  #2  #3  #4  #5  #6  #7
                    qm1_red     1   0   1   0   1   0   0
                    qm1_blue    0   1   0   1   0   1   0
    """
    match_list = []
    for match in filter(lambda match: match['comp_level'] == 'qm', event.matches):
        matchRow = []
        for team in event.teams:
            matchRow.append(1 if team['key'] in match['alliances']['red']['teams'] else 0)
        match_list.append(matchRow)
        matchRow = []
        for team in event.teams:
            matchRow.append(1 if team['key'] in match['alliances']['blue']['teams'] else 0)
        match_list.append(matchRow)

    mat = numpy.array(match_list)
    sum_matches = numpy.sum(mat, axis=0)
    avg_team_matches = sum(sum_matches) / float(len(sum_matches))
    return mat[:, numpy.apply_along_axis(numpy.count_nonzero, 0, mat) > avg_team_matches - 2] 

Example 28

def cluster_words(words, service_name, size):
    stopwords = ["GET", "POST", "total", "http-requests", service_name, "-", "_"]
    cleaned_words = []
    for word in words:
        for stopword in stopwords:
            word = word.replace(stopword, "")
        cleaned_words.append(word)
    def distance(coord):
        i, j = coord
        return 1 - jaro_distance(cleaned_words[i], cleaned_words[j])
    indices = np.triu_indices(len(words), 1)
    distances = np.apply_along_axis(distance, 0, indices)
    return cluster_of_size(linkage(distances), size) 

Example 29

def permute_rows(seed, array):
    """
    Shuffle each row in ``array`` based on permutations generated by ``seed``.

    Parameters
    ----------
    seed : int
        Seed for numpy.RandomState
    array : np.ndarray[ndim=2]
        Array over which to apply permutations.
    """
    rand = np.random.RandomState(seed)
    return np.apply_along_axis(rand.permutation, 1, array) 

Example 30

def __detect_now(self,spike_waveforms,selectChan,current_page):
        if selectChan+"_"+str(current_page) in self.windowsState:
            use_shape0 = self.__pk0_roi0_pos(selectChan,current_page)
            spk_in_line = np.apply_along_axis(self.__in_select_line,1,spike_waveforms,use_shape0[0],use_shape0[1])

            use_shape1 = self.__pk0_roi1_pos(selectChan,current_page)
            spk_in_line1 = np.apply_along_axis(self.__in_select_line,1,spike_waveforms,use_shape1[0],use_shape1[1])
            detected_mask = spk_in_line & spk_in_line1
        else:        
            detected_mask = np.ones(spike_waveforms.shape[0],dtype=bool)
        return detected_mask
    # check whether a spike's waveform is intersect with segment widget 

Example 31

def __in_select_line(self,temp_spike,pos_1,pos_2):
        pos_3y = temp_spike[:-1]
        pos_3x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(pos_3y.shape[0])
        pos_4y = temp_spike[1:]
        pos_4x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(1,pos_3y.shape[0]+1)
        pos_3_4 = np.vstack([ pos_3x,pos_3y,pos_4x,pos_4y]).T
        is_insect = np.apply_along_axis(self.__intersect,1,pos_3_4,pos_1,pos_2)
        return np.any(is_insect) 

Example 32

def __indexs_select_pk0(self,pk0_roi0_h0,pk0_roi0_h1,pk0_roi1_h0,pk0_roi1_h1):
        # get indexs of selected waveforms in pk0
        spk_in_line = np.apply_along_axis(self.__in_select_line,1,self.waveforms_pk0,pk0_roi0_h0,pk0_roi0_h1)
        changed_index = np.where(spk_in_line==True)[0]
        changed_index = np.array(changed_index,dtype=np.int32)

        spk_in_line1 = np.apply_along_axis(self.__in_select_line,1,self.waveforms_pk0,pk0_roi1_h0,pk0_roi1_h1)
        changed_index1 = np.where(spk_in_line1==True)[0]
        changed_index1 = np.array(changed_index1,dtype=np.int32)
        changed_index =  np.intersect1d(changed_index, changed_index1)
        return changed_index + self.indexs_pk0[0] 

Example 33

def __in_select_line(self,temp_spike,pos_1,pos_2):
        pos_3y = temp_spike[:-1]
        pos_3x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(pos_3y.shape[0])
        pos_4y = temp_spike[1:]
        pos_4x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(1,pos_3y.shape[0]+1)
        pos_3_4 = np.vstack([ pos_3x,pos_3y,pos_4x,pos_4y]).T
        is_insect = np.apply_along_axis(self.__intersect,1,pos_3_4,pos_1,pos_2)
        return np.any(is_insect) 

Example 34

def __indexs_select_pk2(self,pk2_roi_pos):
        x_min = pk2_roi_pos[:,0].min()
        x_max = pk2_roi_pos[:,0].max()
        y_min = pk2_roi_pos[:,1].min()
        y_max = pk2_roi_pos[:,1].max()
        pca_1,pca_2 = self.PCAusedList.currentText().split("-")
        pca_1 = np.int(pca_1)-1
        pca_2 = np.int(pca_2)-1
        x = np.logical_and(self.wavePCAs[:,pca_1]>x_min, \
                            self.wavePCAs[:,pca_1]<x_max)
        y = np.logical_and(self.wavePCAs[:,pca_2]>y_min, \
                            self.wavePCAs[:,pca_2]<y_max)
        ind_0 = np.logical_and(x, y)
        ind_0 = np.where(ind_0 == True)[0]
        ind_0 = np.array(ind_0,dtype=np.int32)
        if ind_0.shape[0]>0:
            segments = []
            for i in range(pk2_roi_pos.shape[0]-1):
                segments.append([pk2_roi_pos[i],pk2_roi_pos[i+1]])
            segments.append([pk2_roi_pos[-1],pk2_roi_pos[0]])
            segments = np.array(segments)
            temp_pcas = self.wavePCAs[ind_0]
            temp_pcas = temp_pcas[:,[pca_1,pca_2]]
            is_intersect = np.apply_along_axis(self.__intersect_roi2,1,temp_pcas,segments,pca_1)
            return ind_0[is_intersect]
        else:
            return np.array([],dtype=np.int32) 

Example 35

def __detect_now(self,spike_waveforms,selectChan,current_page):
        if selectChan+"_"+str(current_page) in self.windowsState:
            use_shape0 = self.__pk0_roi0_pos(selectChan,current_page)
            spk_in_line = np.apply_along_axis(self.__in_select_line,1,spike_waveforms,use_shape0[0],use_shape0[1])

            use_shape1 = self.__pk0_roi1_pos(selectChan,current_page)
            spk_in_line1 = np.apply_along_axis(self.__in_select_line,1,spike_waveforms,use_shape1[0],use_shape1[1])
            detected_mask = spk_in_line & spk_in_line1
        else:        
            detected_mask = np.ones(spike_waveforms.shape[0],dtype=bool)
        return detected_mask
    # check whether a spike's waveform is intersect with segment widget 

Example 36

def __in_select_line(self,temp_spike,pos_1,pos_2):
        pos_3y = temp_spike[:-1]
        pos_3x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(pos_3y.shape[0])
        pos_4y = temp_spike[1:]
        pos_4x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(1,pos_3y.shape[0]+1)
        pos_3_4 = np.vstack([ pos_3x,pos_3y,pos_4x,pos_4y]).T
        is_insect = np.apply_along_axis(self.__intersect,1,pos_3_4,pos_1,pos_2)
        return np.any(is_insect) 

Example 37

def __indexs_select_pk0(self,pk0_roi0_h0,pk0_roi0_h1,pk0_roi1_h0,pk0_roi1_h1):
        # get indexs of selected waveforms in pk0
        spk_in_line = np.apply_along_axis(self.__in_select_line,1,self.waveforms_pk0,pk0_roi0_h0,pk0_roi0_h1)
        changed_index = np.where(spk_in_line==True)[0]
        changed_index = np.array(changed_index,dtype=np.int32)

        spk_in_line1 = np.apply_along_axis(self.__in_select_line,1,self.waveforms_pk0,pk0_roi1_h0,pk0_roi1_h1)
        changed_index1 = np.where(spk_in_line1==True)[0]
        changed_index1 = np.array(changed_index1,dtype=np.int32)
        changed_index =  np.intersect1d(changed_index, changed_index1)
        return changed_index + self.indexs_pk0[0] 

Example 38

def __in_select_line(self,temp_spike,pos_1,pos_2):
        pos_3y = temp_spike[:-1]
        pos_3x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(pos_3y.shape[0])
        pos_4y = temp_spike[1:]
        pos_4x = np.ones(pos_3y.shape,dtype=np.int32)*np.arange(1,pos_3y.shape[0]+1)
        pos_3_4 = np.vstack([ pos_3x,pos_3y,pos_4x,pos_4y]).T
        is_insect = np.apply_along_axis(self.__intersect,1,pos_3_4,pos_1,pos_2)
        return np.any(is_insect) 

Example 39

def __indexs_select_pk2(self,pk2_roi_pos):
        x_min = pk2_roi_pos[:,0].min()
        x_max = pk2_roi_pos[:,0].max()
        y_min = pk2_roi_pos[:,1].min()
        y_max = pk2_roi_pos[:,1].max()
        pca_1,pca_2 = self.PCAusedList.currentText().split("-")
        pca_1 = np.int(pca_1)-1
        pca_2 = np.int(pca_2)-1
        x = np.logical_and(self.wavePCAs[:,pca_1]>x_min, \
                            self.wavePCAs[:,pca_1]<x_max)
        y = np.logical_and(self.wavePCAs[:,pca_2]>y_min, \
                            self.wavePCAs[:,pca_2]<y_max)
        ind_0 = np.logical_and(x, y)
        ind_0 = np.where(ind_0 == True)[0]
        ind_0 = np.array(ind_0,dtype=np.int32)
        if ind_0.shape[0]>0:
            segments = []
            for i in range(pk2_roi_pos.shape[0]-1):
                segments.append([pk2_roi_pos[i],pk2_roi_pos[i+1]])
            segments.append([pk2_roi_pos[-1],pk2_roi_pos[0]])
            segments = np.array(segments)
            temp_pcas = self.wavePCAs[ind_0]
            temp_pcas = temp_pcas[:,[pca_1,pca_2]]
            is_intersect = np.apply_along_axis(self.__intersect_roi2,1,temp_pcas,segments,pca_1)
            return ind_0[is_intersect]
        else:
            return np.array([],dtype=np.int32) 

Example 40

def normalize_simple(matrix, mask):
    """Normalizes a matrix by columns, and then by rows. With multiple
    time-series, the data are normalized to the within-series total, not the
    entire data set total.

    Parameters
    ----------
    matrix: np.matrix
        Time-series matrix of abundance counts. Rows are sequences, columns
        are samples/time-points.
    mask: list or np.array
        List of objects with length matching the number of timepoints, where
        unique values delineate multiple time-series. If there is only one
        time-series in the data set, it's a list of identical objects.

    Returns
    -------
    normal_matrix: np.matrix
        Matrix where the columns (within-sample) have been converted to 
        proportions, then the rows are normalized to sum to 1.
    """
    normal_matrix = matrix / matrix.sum(0)
    normal_matrix[np.invert(np.isfinite(normal_matrix))] = 0
    for mask_val in np.unique(mask):
        y = normal_matrix[:, np.where(mask == mask_val)[0]]
        y = np.apply_along_axis(zscore, 1, y)
        normal_matrix[:, np.where(mask == mask_val)[0]] = y
        del y
    return normal_matrix 

Example 41

def feat_eeg(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],9),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    spindle = np.sum(np.abs(w[:,np.arange(12*nsamp/sfreq,14*nsamp/sfreq, dtype=int)]),axis=1)
    sum_abs_pow = delta + theta + alpha + beta + gamma + spindle
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = spindle /sum_abs_pow
    feats[:,6] = np.log10(stats.kurtosis(signals, fisher=False, axis=1))        # kurtosis
    feats[:,7] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,8] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats) 

Example 42

def feat_wavelet(signals):
    """
    calculate the relative power as defined by Leangkvist (2012),
    assuming signal is recorded with 100hz
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    feats = np.zeros((signals.shape[0],8),dtype='float32')
    # 5 FEATURE for freq babnds
    w = (fft(signals,axis=1)).real
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))        # kurtosis
    feats[:,6] = np.log10(-np.sum([(x/nsamp)*(np.log(x/nsamp)) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    #feats[:,7] = np.polynomial.polynomial.polyfit(np.log(f[np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]), np.log(w[0,np.arange(0.5*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),1)
    feats[:,7] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5)
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats) 

Example 43

def feat_eog(signals):
    """
    calculate the EOG features
    :param signals: 1D or 2D signals
    """

    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],15),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    feats[:,6] = np.sqrt(np.max(signals, axis=1))    #PAV
    feats[:,7] = np.sqrt(np.abs(np.min(signals, axis=1)))   #VAV   
    feats[:,8] = np.argmax(signals, axis=1)/nsamp #PAP
    feats[:,9] = np.argmin(signals, axis=1)/nsamp #VAP
    feats[:,10] = np.sqrt(np.sum(np.abs(signals), axis=1)/ np.mean(np.sum(np.abs(signals), axis=1))) # AUC
    feats[:,11] = np.sum(((np.roll(np.sign(signals), 1,axis=1) - np.sign(signals)) != 0).astype(int),axis=1)/nsamp #TVC
    feats[:,12] = np.log10(np.std(signals, axis=1)) #STD/VAR
    feats[:,13] = np.log10(stats.kurtosis(signals,fisher=False,axis=1))       # kurtosis
    feats[:,14] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')
    return np.nan_to_num(feats) 

Example 44

def feat_emg(signals):
    """
    calculate the EMG median as defined by Leangkvist (2012),
    """
    if signals.ndim == 1: signals = np.expand_dims(signals,0)
    sfreq = use_sfreq
    nsamp = float(signals.shape[1])
    w = (fft(signals,axis=1)).real   
    feats = np.zeros((signals.shape[0],13),dtype='float32')
    delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
    theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
    alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
    beta  = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
    gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1)   # only until 50, because hz=100
    sum_abs_pow = delta + theta + alpha + beta + gamma
    feats[:,0] = delta /sum_abs_pow
    feats[:,1] = theta /sum_abs_pow
    feats[:,2] = alpha /sum_abs_pow
    feats[:,3] = beta  /sum_abs_pow
    feats[:,4] = gamma /sum_abs_pow
    feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
    emg = np.sum(np.abs(w[:,np.arange(12.5*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)
    feats[:,6] = emg / np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)  # ratio of high freq to total motor
    feats[:,7] = np.median(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)    # median freq
    feats[:,8] = np.mean(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)   #  mean freq
    feats[:,9] = np.std(signals, axis=1)    #  std 
    feats[:,10] = np.mean(signals,axis=1)
    feats[:,11] = np.log10(stats.kurtosis(signals,fisher=False,axis=1) )
    feats[:,12] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1))  # entropy.. yay, one line...
    if np.any(feats==np.nan): print('NaN detected')

    return np.nan_to_num(feats) 

Example 45

def jaccard(inclusion):
    """Calculate jaccard distances for a community."""
    logger.info("calculating jaccard distance for {}x{} input matrix".format(
                *inclusion.shape))
    jaccard = np.apply_along_axis(
        lambda a: (a & inclusion).sum(1), 1, inclusion)
    jaccard = jaccard / np.apply_along_axis(
        lambda a: (a | inclusion).sum(1), 1, inclusion)

    return 1 - jaccard 

Example 46

def euclidean(inclusion):
    """Calculate euclidean distances for a community."""
    logger.info("calculating euclidean distance for {}x{} input matrix".format(
                *inclusion.shape))
    euclidean = np.apply_along_axis(
        lambda a: ((a - inclusion) ** 2).sum(1), 1, inclusion)

    return np.sqrt(euclidean) 

Example 47

def calc_pairwise_cosine(model):
    n = model.num_topics
    weights = model.state.get_lambda()
    weights = np.apply_along_axis(lambda x: x / x.sum(), 1, weights) # get dist.
    weights = unitmatrix(weights) # normalize
    score = []
    for i in range(n):
        for j in range(i + 1, n):
            score.append(np.arccos(weights[i].dot(weights[j])))

    return np.mean(score), np.std(score) 

Example 48

def calc_pairwise_dev(model):
    # the average squared deviation from 0 (90 degree)
    n = model.num_topics
    weights = model.state.get_lambda()
    weights = np.apply_along_axis(lambda x: x / x.sum(), 1, weights) # get dist.
    weights = unitmatrix(weights) # normalize
    score = 0.
    for i in range(n):
        for j in range(i + 1, n):
            score += (weights[i].dot(weights[j]))**2

    return np.sqrt(2. * score / n / (n - 1)) 

Example 49

def decode(self, X, mode='argmax'):
    if mode == 'argmax':
      X = X.argmax(axis=-1)
    elif mode == 'choice':
      X = np.apply_along_axis(lambda vec: \
                              np.random.choice(len(vec), 1,
                                               p=(vec / np.sum(vec))),
                              axis=-1, arr=X).ravel()
    return str.join('',(self.indices_char[x] for x in X)) 

Example 50

def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
    """
    sort + indexing median, faster for small medians along multiple
    dimensions due to the high overhead of apply_along_axis

    see nanmedian for parameter usage
    """
    a = np.ma.masked_array(a, np.isnan(a))
    m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
    for i in range(np.count_nonzero(m.mask.ravel())):
        warnings.warn("All-NaN slice encountered", RuntimeWarning)
    if out is not None:
        out[...] = m.filled(np.nan)
        return out
    return m.filled(np.nan) 
点赞