# Python numpy.divide() 使用实例

The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.

Example 1

def EStep(self):
P = np.zeros((self.M, self.N))

for i in range(0, self.M):
diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
diff    = np.multiply(diff, diff)
P[i, :] = P[i, :] + np.sum(diff, axis=1)

c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N

P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den==0] = np.finfo(float).eps

self.P   = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1  = np.sum(self.P, axis=1)
self.Np  = np.sum(self.P1)

Example 2

def reproject_one_shape( self, shape, bbox, window, nfids):
'''Re-project a shape to the original plan.

'''

shape_re = shape
std_w, std_h = window
x = bbox[0]
y = bbox[1]
w = bbox[2]
h = bbox[3]
center_x = x + np.divide(w, 2)
center_y = y + np.divide(h, 2)

X = shape[0:nfids]
Y = shape[nfids:]
# reprojecting ...
X = X * (std_w / 2.) + center_x
Y = Y * (std_h / 2.) + center_y
shape_re = np.concatenate((X,Y))

return shape_re

Example 3

def normalise_images(X):
'''
Helper for making the images zero mean and unit standard deviation i.e. `white`
'''

X_white = np.zeros(X.shape, dtype=np.float32)

for ii in range(X.shape[0]):

Xc = X[ii,:,:,:]
mc = Xc.mean()
sc = Xc.std()

Xc_white = np.divide((Xc - mc), sc)

X_white[ii,:,:,:] = Xc_white

return X_white.astype(np.float32)

Example 4

def IAM(self):
"""
Computation of Ideal Amplitude Mask. As appears in :
H. Erdogan, J. R. Hershey, S. Watanabe, and J. Le Roux,
"Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,"
in ICASSP 2015, Brisbane, April, 2015.
Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component
(In this case the observed mixture should be placed)
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values

"""
self._mask = np.divide(self._sTarget, (self._eps + self._nResidual))

Example 5

def ExpM(self):
"""
Approximate a signal via element-wise exponentiation. As appears in :
S.I. Mimilakis, K. Drossos, T. Virtanen, and G. Schuller,
"Deep Neural Networks for Dynamic Range Compression in Mastering Applications,"
in proc. of the 140th Audio Engineering Society Convention, Paris, 2016.
Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values

"""
np.log(self._nResidual.clip(self._eps, np.inf)**self._alpha))

Example 6

def IBM(self):
"""
Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values

"""
theta = 0.5
mask = np.divide(self._sTarget ** self._alpha, (self._eps + self._nResidual ** self._alpha))

Example 7

def UBBM(self):
"""
Computation of Upper Bound Binary Mask. As appears in :
- J.J. Burred, "From Sparse Models to Timbre Learning: New Methods for Musical Source Separation", PhD Thesis,
TU Berlin, 2009.

Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component (Should not contain target source!)
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values
"""
mask = 20. * np.log(self._eps + np.divide((self._eps + (self._sTarget ** self._alpha)),
((self._eps + (self._nResidual ** self._alpha)))))

Example 8

def alphaHarmonizableProcess(self):
"""
Computation of Wiener like mask using fractional power spectrograms. As appears in :
A. Liutkus, R. Badeau, "Generalized Wiener filtering with fractional power spectrograms",
40th International Conference on Acoustics, Speech and Signal Processing (ICASSP),
Apr 2015, Brisbane, Australia.
Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component or a list
of 2D ndarrays which will be added together
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values

"""
print('Harmonizable Process with alpha:', str(self._alpha))
localsTarget = self._sTarget ** self._alpha
numElements = len(self._nResidual)
if numElements > 1:
localnResidual = self._nResidual[0] ** self._alpha + localsTarget
for indx in range(1, numElements):
localnResidual += self._nResidual[indx] ** self._alpha
else :
localnResidual = self._nResidual[0] ** self._alpha + localsTarget

self._mask = np.divide((localsTarget + self._eps), (self._eps + localnResidual))

Example 9

def phaseSensitive(self):
"""
Computation of Phase Sensitive Mask. As appears in :
H Erdogan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux,
"Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,"
in ICASSP 2015, Brisbane, April, 2015.

Args:
mTarget:   (2D ndarray) Magnitude Spectrogram of the target component
pTarget:   (2D ndarray) Phase Spectrogram of the output component
mY:        (2D ndarray) Magnitude Spectrogram of the residual component
pY:        (2D ndarray) Phase Spectrogram of the residual component
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values

"""
# Compute Phase Difference
Theta = (self._pTarget - self._pY)
self._mask = 2./ (1. + np.exp(-np.multiply(np.divide(self._sTarget, self._eps + self._nResidual), np.cos(Theta)))) - 1.

Example 10

transform=None, sigma=0.0, color_vec=None):
"""Load augmented image with output shape (w, h).

Default arguments return non augmented image of shape (w, h).
To apply a fixed transform (color augmentation) specify transform
(color_vec).
To generate a random augmentation specify aug_params and sigma.
"""
img = perturb(img, augmentation_params=aug_params, target_shape=(w, h))
#if transform is None:
#    img = perturb(img, augmentation_params=aug_params, target_shape=(w, h))
#else:
#    img = perturb_fixed(img, tform_augment=transform, target_shape=(w, h))

#randString = str(np.random.normal(0,1,1))
#im = Image.fromarray(img.transpose(1,2,0).astype('uint8'))
#figName = fname.split("/")[-1]
#im.save("imgs/"+figName+randString+".jpg")

np.subtract(img, MEAN[:, np.newaxis, np.newaxis], out=img)
#np.divide(img, STD[:, np.newaxis, np.newaxis], out=img)
#img = augment_color(img, sigma=sigma, color_vec=color_vec)
return img

Example 11

def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
z = np.array([-.5, 0., .5, .8])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')

Example 12

def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.

"""
other_data = getdata(other)
# The following 3 lines control the domain filling
(_, fval) = ufunc_fills[np.floor_divide]
other_data))
return self

Example 13

def __itruediv__(self, other):
"""
True divide self by other in-place.

"""
other_data = getdata(other)
# The following 3 lines control the domain filling
(_, fval) = ufunc_fills[np.true_divide]
other_data))
return self

Example 14

def __ipow__(self, other):
"""
Raise self to the power other, in place.

"""
other_data = getdata(other)
with np.errstate(divide='ignore', invalid='ignore'):
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
else:
np.copyto(self._data, self.fill_value, where=invalid)
return self

Example 15

def update_kl_loss(p, lambdas, T, Cs):
"""
Updates C according to the KL Loss kernel with the S Ts couplings calculated at each iteration

Parameters
----------
p  : ndarray, shape (N,)
weights in the targeted barycenter
lambdas : list of the S spaces' weights
T : list of S np.ndarray(ns,N)
the S Ts couplings calculated at each iteration
Cs : list of S ndarray, shape(ns,ns)
Metric cost matrices

Returns
----------
C : ndarray, shape (ns,ns)
updated C matrix
"""
tmpsum = sum([lambdas[s] * np.dot(T[s].T, Cs[s]).dot(T[s])
for s in range(len(T))])
ppt = np.outer(p, p)

return np.exp(np.divide(tmpsum, ppt))

Example 16

def IBM(self):
"""
Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values

"""
theta = 0.5
mask = np.divide(self._sTarget ** self._alpha, (self._eps + self._nResidual ** self._alpha))

Example 17

def UBBM(self):
"""
Computation of Upper Bound Binary Mask. As appears in :
- J.J. Burred, "From Sparse Models to Timbre Learning: New Methods for Musical Source Separation", PhD Thesis,
TU Berlin, 2009.

Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component (Should not contain target source!)
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values
"""
mask = 20. * np.log(self._eps + np.divide((self._eps + (self._sTarget ** self._alpha)),
((self._eps + (self._nResidual ** self._alpha)))))

Example 18

def Wiener(self):
"""
Computation of Wiener-like Mask. As appears in :
H Erdogan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux,
"Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,"
in ICASSP 2015, Brisbane, April, 2015.
Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values
"""
localsTarget = self._sTarget ** 2.
numElements = len(self._nResidual)
if numElements > 1:
localnResidual = self._nResidual[0] ** 2. + localsTarget
for indx in range(1, numElements):
localnResidual += self._nResidual[indx] ** 2.
else :
localnResidual = self._nResidual[0] ** 2. + localsTarget

self._mask = np.divide((localsTarget + self._eps), (self._eps + localnResidual))

Example 19

def alphaHarmonizableProcess(self):
"""
Computation of alpha harmonizable Wiener like mask, as appears in :
A. Liutkus, R. Badeau, "Generalized Wiener filtering with fractional power spectrograms",
40th International Conference on Acoustics, Speech and Signal Processing (ICASSP),
Apr 2015, Brisbane, Australia.
Args:
sTarget:   (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component or a list
of 2D ndarrays which will be summed
Returns:
mask:      (2D ndarray) Array that contains time frequency gain values

"""
print('Harmonizable Process with alpha:', str(self._alpha))
localsTarget = self._sTarget ** self._alpha
numElements = len(self._nResidual)
if numElements > 1:
localnResidual = self._nResidual[0] ** self._alpha + localsTarget
for indx in range(1, numElements):
localnResidual += self._nResidual[indx] ** self._alpha
else :
localnResidual = self._nResidual[0] ** self._alpha + localsTarget

self._mask = np.divide((localsTarget + self._eps), (self._eps + localnResidual))

Example 20

def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length+1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices

Example 21

def generate_signals(self):
for i in range(self.Trials):
x = self.True_position[i, 0]
y = self.True_position[i, 1]
z = self.True_position[i, 2]

mic_data = [numpy.vstack((numpy.zeros((int(round(self.Padding[i, j])), 1)), self.wave)) for j in range(self.N)]
lenvec = numpy.array([len(mic) for mic in mic_data])
m = max(lenvec)
c = numpy.array([m - mic_len for mic_len in lenvec])
mic_data = [numpy.vstack((current_mic, numpy.zeros((c[idx], 1)))) for idx, current_mic in enumerate(mic_data)]
mic_data = [numpy.divide(current_mic, self.Distances[i, idx]) for idx, current_mic in enumerate(mic_data)]
multitrack = numpy.array(mic_data)

print 'prepared all data.'

x, y, z = self.locate(self.Sen_position, multitrack)

self.Est_position[i, 0] = x
self.Est_position[i, 1] = y
self.Est_position[i, 2] = z

Example 22

def _kl_hr(pha, amp, nbins):
nPha, npts, nAmp = *pha.shape, amp.shape[0]
step = 2*np.pi/nbins
vecbin = binarize(-np.pi, np.pi+step, step, step)
if len(vecbin) > nbins:
vecbin = vecbin[0:-1]

abin = np.zeros((nAmp, nPha, nbins))
for k, i in enumerate(vecbin):
# Find where phase take vecbin values :
pL, pC = np.where((pha >= i[0]) & (pha < i[1]))

# Matrix to do amp x binMat :
binMat = np.zeros((npts, nPha))
binMat[pC, pL] = 1
meanMat = np.matlib.repmat(binMat.sum(axis=0), nAmp, 1)
meanMat[meanMat == 0] = 1

# Multiply matrix :
abin[:, :, k] = np.divide(np.dot(amp, binMat), meanMat)
abinsum = np.array([abin.sum(axis=2) for k in range(nbins)])

return abin, abinsum

Example 23

"""
??????
?????????????????????????????????????????????????????????????????
(1)	???????????????????????????????????????????????????????????????(????????????????????)
(2)	?????????(??????)?????????????????????????????????????????????
(3)	?????????(??????)????????????????????????????????????????????
(4)	????????(?????)?????????????????????????????????????????????????
(5)	?[???????(2)	]-[????????(3)]-[??????(4)]??????????
(6)	????????????????????????????????????
:param predictions_list:
:return:
"""
# print(pred)
price_hit = price_trend_hit(pred, answer)  # ???????????

margin_rate=np.divide(margin1,margin2)
score=np.sum(margin_rate)
return score ,margin_rate, price_hit,missing

Example 24

def train(self):
eps = 1e-10
for i in range(self.epo):
if i % 1 == 0:
self.show_error()

A = np.asarray(self.A.copy())
Z = np.asarray(self.Z.copy())
start = time.time()
Z1 = np.multiply(Z, np.asarray(self.A.transpose() * self.Y))
Z = np.divide(Z1, eps + np.asarray(self.A.transpose() * self.A * self.Z)) # + eps to avoid divided by 0
self.Z = np.asmatrix(Z)
A1 = np.multiply(A, np.asarray( self.Y * self.Z.transpose()))
A = np.divide(A1, eps + np.asarray( self.A * self.Z * self.Z.transpose()))
end = time.time()
self.A = np.asmatrix(A)
self.time = self.time + end - start

Example 25

def make_xy_data(csv, drop_nan_columns=None):
n = len(data)

if drop_nan_columns:
data = data.dropna(subset=drop_nan_columns)

print "[Warning] dropped %s samples because of NaN values" % (n-len(data))

y = np.divide(data[['prix']].astype(float).values.T,
data[['surface_m2']].astype(float).values.T
)[0]

x = data.drop(['prix'], axis=1)

return x, y

Example 26

def get_centroid_idf(text, emb, idf, stopwords, D):
# Computing Terms' Frequency
tf = defaultdict(int)
tokens = bioclean(text)
for word in tokens:
if word in emb and word not in stopwords:
tf[word] += 1

# Computing the centroid
centroid = np.zeros((1, D))
div = 0

for word in tf:
if word in idf:
p = tf[word] * idf[word]
div += p
if div != 0:
centroid = np.divide(centroid, div)
return centroid

Example 27

transform=None, sigma=0.0, color_vec=None):
"""Load augmented image with output shape (w, h).

Default arguments return non augmented image of shape (w, h).
To apply a fixed transform (color augmentation) specify transform
(color_vec).
To generate a random augmentation specify aug_params and sigma.
"""
if transform is None:
img = perturb(img, augmentation_params=aug_params, target_shape=(w, h))
else:
img = perturb_fixed(img, tform_augment=transform, target_shape=(w, h))

np.subtract(img, MEAN[:, np.newaxis, np.newaxis], out=img)
np.divide(img, STD[:, np.newaxis, np.newaxis], out=img)
img = augment_color(img, sigma=sigma, color_vec=color_vec)
return img

Example 28

def __init__(self, bounds, orig_resolution, tile_width, tile_height, tile_format_url,
zoom_level=0, missing_z=None, image_leaf_shape=None):
self.orig_bounds = bounds
self.orig_resolution = orig_resolution
self.tile_width = tile_width
self.tile_height = tile_height
self.tile_format_url = tile_format_url

self.zoom_level = int(zoom_level)
if missing_z is None:
missing_z = []
self.missing_z = frozenset(missing_z)
if image_leaf_shape is None:
image_leaf_shape = [10, tile_height, tile_width]

scale = np.exp2(np.array([0, self.zoom_level, self.zoom_level])).astype(np.int64)

data_shape = (np.zeros(3), np.divide(bounds, scale).astype(np.int64))
self.image_data = OctreeVolume(image_leaf_shape,
data_shape,
'float32',
populator=self.image_populator)

self.label_data = None

Example 29

def apply_cmvn(utt, mean, variance, reverse=False):
"""Apply mean and variance normalisation based on previously computed statistics.

Args:
utt: The utterance feature numpy matrix.
stats: A numpy array containing the mean and variance statistics.
The first row contains the sum of all the fautures and as a last
element the total numbe of features. The second row contains the
squared sum of the features and a zero at the end

Returns:
A numpy array containing the mean and variance normalized features
"""
if not reverse:
#return mean and variance normalised utterance
return np.divide(np.subtract(utt, mean), np.sqrt(variance))
else:
#reversed normalization

Example 30

def updateQProbs(lastStateID, lastAction):
# print 'np.sum(QCounts[lastStateID,]) = ', np.sum(QCounts[lastStateID,])
# print 'np.sum(QCounts[lastStateID,]) = ', np.sum(QCounts[lastStateID,])
# print 'np.sum(QValues[lastStateID,]) = ', np.sum(QValues[lastStateID,])
if np.sum(QCounts[lastStateID,]) == 0 or np.sum(QValues[lastStateID,]) == 0:
tau = 1
else:
# print '(-(np.mean(QValues[lastStateID,]))) = ', (-(np.mean(QValues[lastStateID,])))
# print '(np.mean(QCounts[lastStateID,])) = ', (np.mean(QCounts[lastStateID,]))
tau = (-(np.mean(QValues[lastStateID,])))/(np.mean(QCounts[lastStateID,]))
# print 'tau = ', tau
numerator = np.exp(QValues[lastStateID, ]/tau)
tempSum = np.sum(numerator)
denominator = np.array([tempSum, tempSum, tempSum, tempSum, tempSum, tempSum, tempSum, tempSum])
QProbs[lastStateID, ] = np.divide(numerator, denominator)

# initial dataframes which will be able to store performance data over different days

Example 31

def multistate_distribution(data, parameters, limit,
normalize_likelihood_level_cell_counts = True):

data_grandpa, data_parent, data_children = data
sigma, b, a_grandpa, a_parent, a_children = parameters

normalization_factor = normalize(sigma, a_grandpa, b, limit)
grandpa_dist = [steady_state_distribution(x, sigma, a_grandpa, b, normalization_factor) for x in data_grandpa]

normalization_factor = normalize(sigma, a_parent, b, limit)
parent_dist = [steady_state_distribution(x, sigma, a_parent, b, normalization_factor) for x in data_parent]

normalization_factor = normalize(sigma, a_children, b, limit)
children_dist = [steady_state_distribution(x, sigma, a_children, b, normalization_factor) for x in data_children]

grandpa_dist = np.array(grandpa_dist, dtype = float)
parent_dist = np.array(parent_dist, dtype = float)
children_dist = np.array(children_dist, dtype = float)

if normalize_likelihood_level_cell_counts:
grandpa_dist = np.divide(grandpa_dist, float(data_grandpa.size))
parent_dist = np.divide(parent_dist, float(data_parent.size))
children_dist = np.divide(children_dist, float(data_children.size))

return grandpa_dist, parent_dist, children_dist

Example 32

def process_study(study_id, out_dir):
mean = np.mean(isometric_volume).astype(np.float32)
std = np.std(isometric_volume).astype(np.float32)
volume_resized = scipy.ndimage.interpolation.zoom(isometric_volume,
np.divide(64, isometric_volume.shape),
mode='nearest')
volume_resized = (volume_resized.astype(np.float32) - mean) / (std + 1e-7)
for i in range(7):
z_shift = random.randint(0, 5)
z0 = (volume_resized.shape[0]//2) - z_shift
z1 = volume_resized.shape[0] - z_shift
y_shift = random.randint(0, 5)
y0 = y_shift
y1 = (volume_resized.shape[1]//2) + y_shift
volume_resized_sample = volume_resized[z0:z1, y0:y1, :]
volume_resized_sample = np.expand_dims(volume_resized_sample, axis=3)
out_filepath = os.path.join(out_dir, '{}.npy'.format(uuid4()))
np.save(out_filepath, volume_resized_sample)
return

Example 33

def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
z = np.array([-.5, 0., .5, .8])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')

Example 34

def __ifloordiv__(self, other):
"""
Floor divide self by other in-place.

"""
other_data = getdata(other)
# The following 3 lines control the domain filling
(_, fval) = ufunc_fills[np.floor_divide]
other_data))
return self

Example 35

def __itruediv__(self, other):
"""
True divide self by other in-place.

"""
other_data = getdata(other)
# The following 3 lines control the domain filling
(_, fval) = ufunc_fills[np.true_divide]
other_data))
return self

Example 36

def __ipow__(self, other):
"""
Raise self to the power other, in place.

"""
other_data = getdata(other)
with np.errstate(divide='ignore', invalid='ignore'):
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
else:
np.copyto(self._data, self.fill_value, where=invalid)
return self

Example 37

def get_ensemble_weights(ensemble_dirs):
'''
return ensembling weightes by reading ./output/<ensemble_dir>/models_ensembled.txt
'''
total_models = 0
weights = np.zeros(len(ensemble_dirs))

for i, ensemble_dir in enumerate(ensemble_dirs):
ensembled_model_names, _ = get_models_ensembled(ensemble_dir)
num_models_used = len(ensembled_model_names)

total_models += num_models_used
weights[i] = num_models_used

weights = np.divide(weights, total_models)
return weights

Example 38

def htmt(self):

htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)

mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))

comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]

comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]

comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
#            block[block == 1] = np.nan
comb__.append(np.nanmean(block))

htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0

htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)

return htmt

Example 39

def updateTransform(self):
muX = np.divide(np.sum(np.dot(self.P, self.X), axis=0), self.Np)
muY = np.divide(np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np)

self.XX = self.X - np.tile(muX, (self.N, 1))
YY      = self.Y - np.tile(muY, (self.M, 1))

self.A = np.dot(np.transpose(self.XX), np.transpose(self.P))
self.A = np.dot(self.A, YY)

U, _, V = np.linalg.svd(self.A, full_matrices=True)
C = np.ones((self.D, ))
C[self.D-1] = np.linalg.det(np.dot(U, V))

self.R = np.dot(np.dot(U, np.diag(C)), V)

self.YPY = np.dot(np.transpose(self.P1), np.sum(np.multiply(YY, YY), axis=1))

self.s = np.trace(np.dot(np.transpose(self.A), self.R)) / self.YPY

self.t = np.transpose(muX) - self.s * np.dot(self.R, np.transpose(muY))

Example 40

def EStep(self):
P = np.zeros((self.M, self.N))

for i in range(0, self.M):
diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
diff    = np.multiply(diff, diff)
P[i, :] = P[i, :] + np.sum(diff, axis=1)

c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N

P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den==0] = np.finfo(float).eps

self.P   = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1  = np.sum(self.P, axis=1)
self.Np  = np.sum(self.P1)

Example 41

def updateTransform(self):
muX = np.divide(np.sum(np.dot(self.P, self.X), axis=0), self.Np)
muY = np.divide(np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np)

self.XX = self.X - np.tile(muX, (self.N, 1))
YY      = self.Y - np.tile(muY, (self.M, 1))

self.A = np.dot(np.transpose(self.XX), np.transpose(self.P))
self.A = np.dot(self.A, YY)

self.YPY = np.dot(np.transpose(YY), np.diag(self.P1))
self.YPY = np.dot(self.YPY, YY)

Bt = np.linalg.solve(np.transpose(self.YPY), np.transpose(self.A))
self.B = np.transpose(Bt)
self.t = np.transpose(muX) - np.dot(self.B, np.transpose(muY))

Example 42

def eStep(self):
P = np.zeros((self.M, self.N))

for i in range(0, self.M):
diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
diff    = np.multiply(diff, diff)
P[i, :] = P[i, :] + np.sum(diff, axis=1)

c = (2 * np.pi * self.sigma2) ** (self.D / 2)
c = c * self.w / (1 - self.w)
c = c * self.M / self.N

P = np.exp(-P / (2 * self.sigma2))
den = np.sum(P, axis=0)
den = np.tile(den, (self.M, 1))
den[den==0] = np.finfo(float).eps

self.P   = np.divide(P, den)
self.Pt1 = np.sum(self.P, axis=0)
self.P1  = np.sum(self.P, axis=1)
self.Np  = np.sum(self.P1)

Example 43

def sentence2vec(sentence, model=WORD2VEC, stopwords=STOPWORDS, metadata=None, section=None, wordvecs_only=True):
"""
Changes a sentence into a vector by averaging the word vectors of every non-stopword word in the sentence.
:param sentence: the sentence to turn into a vector, as a list of words
:param model: the word2vec model to use to convert words to vectors
:param stopwords: stopwords to not include in the averaging of each sentence.
:param section: the section of the paper the sentence occurs in.
:param wordvecs_only: will turn a sentence into a vector using only the the word vectors from the model, no extra
features.
:return: the sentence in vector representation
"""
# The shape of the model, used to get the number of features and its vocab
model_shape = model.syn0.shape
vocab = set(model.index2word)

# The array that will be used to calculate the average word vector
average = np.zeros((model_shape[1]), dtype="float32")
total_word_count = 0

for word in sentence:

if word in stopwords:
continue

if word in vocab:
word_rep = model[word]
average += word_rep
total_word_count += 1

if total_word_count == 0:
total_word_count = 1

average = np.divide(average, total_word_count)

sentence_vec = average

return sentence_vec

Example 44

def evaluation(self, theta, X_test, y_test):
theta = theta[:, :-1]
M, n_test = theta.shape[0], len(y_test)

prob = np.zeros([n_test, M])
for t in range(M):
coff = np.multiply(y_test, np.sum(-1 * np.multiply(nm.repmat(theta[t, :], n_test, 1), X_test), axis=1))
prob[:, t] = np.divide(np.ones(n_test), (1 + np.exp(coff)))

prob = np.mean(prob, axis=1)
acc = np.mean(prob > 0.5)
llh = np.mean(np.log(prob))
return [acc, llh]

Example 45

def update(self, x0, lnprob, n_iter = 1000, stepsize = 1e-3, bandwidth = -1, alpha = 0.9, debug = False):
# Check input
if x0 is None or lnprob is None:
raise ValueError('x0 or lnprob cannot be None!')

theta = np.copy(x0)

fudge_factor = 1e-6
for iter in range(n_iter):
if debug and (iter+1) % 1000 == 0:
print 'iter ' + str(iter+1)

# calculating the kernel matrix
kxy, dxkxy = self.svgd_kernel(theta, h = -1)

if iter == 0:
else:

return theta

Example 46

def __entropy(self, pdf):
"""Calculate shannon entropy of posterior distribution.
Arguments
---------
pdf :   ndarray (float64)
posterior distribution of psychometric curve parameters for each stimuli

Returns
-------
1D numpy array (float64) : Shannon entropy of posterior for each stimuli
"""
# Marginalize out all nuisance parameters, i.e. all except alpha and sigma
postDims = np.ndim(pdf)
if self.marginalize == True:
while postDims > 3:  # marginalize out second-to-last dimension, last dim is x
pdf = np.sum(pdf, axis=-2)
postDims -= 1
# find expected entropy, suppress divide-by-zero and invalid value warnings
# as this is handled by the NaN redefinition to 0
with np.errstate(divide='ignore', invalid='ignore'):
entropy = np.multiply(pdf, np.log(pdf))
entropy[np.isnan(entropy)] = 0  # define 0*log(0) to equal 0
dimSum = tuple(range(postDims - 1))  # dimensions to sum over. also a Chinese dish
entropy = -(np.sum(entropy, axis=dimSum))
return entropy

Example 47

"""
Adjoint of (pseudo-)inverse of 2D data.

Note that this is also the (pseudo-)inverse of the adjoint.

Usage:

Input:

X             : 2D data.
shearletSystem: Structure containing a shearlet system. This
should be the same system as the one
previously used for decomposition.

Output:

coeffs:          X x Y x N array of shearlet coefficients.
"""
# skipping useGPU stuff...

# STUFF
Xfreq = np.divide(fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(X))), shearletSystem["dualFrameWeights"])
coeffs = np.zeros(shearletSystem["shearlets"].shape, dtype=complex)

for j in range(shearletSystem["nShearlets"]):
coeffs[:,:,j] = fftlib.fftshift(fftlib.ifft2(fftlib.ifftshift(Xfreq*np.conj(shearletSystem["shearlets"][:,:,j]))))

return np.real(coeffs).astype(X.dtype)

#
##############################################################################

Example 48

def __call__(self, sample):
# keep tract of absolute value of
np.absolute(np.asarray(sample.channel_data)))
self.sample_count = self.sample_count + 1

elapsed_time = timeit.default_timer() - self.last_report
if elapsed_time > self.polling_interval:
channel_noise_power = np.divide(self.diff, self.sample_count)

print (channel_noise_power)
self.diff = np.zeros(self.eeg_channels)
self.last_report = timeit.default_timer()

Example 49

def normalize(self):
"""
Normalize by maximum amplitude.
"""
return Wave(np.divide(self, np.max(np.abs(self), 0)), self.sample_rate)

Example 50

def normalize_points(self, x):
return np.divide(x - np.amin(self.X, 0) ,
np.amax(self.X, 0) - np.amin(self.X, 0), np.empty_like(x))