The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def _estimate_lambda_single_y(y): """Estimate lambda for a single y, given a range of lambdas through which to search. No validation performed. Parameters ---------- y : ndarray, shape (n_samples,) The vector being estimated against """ # ensure is array y = np.array(y) # Use scipy's log-likelihood estimator b = boxcox(y, lmbda=None) # Return lambda corresponding to maximum P return b[1]
Example 2
def fit(self, graphs, y=None): rnd = check_random_state(self.random_state) n_samples = len(graphs) # get basis vectors if self.n_components > n_samples: n_components = n_samples else: n_components = self.n_components n_components = min(n_samples, n_components) inds = rnd.permutation(n_samples) basis_inds = inds[:n_components] basis = [] for ind in basis_inds: basis.append(graphs[ind]) basis_kernel = self.kernel(basis, basis, **self._get_kernel_params()) # sqrt of kernel matrix on basis vectors U, S, V = svd(basis_kernel) S = np.maximum(S, 1e-12) self.normalization_ = np.dot(U * 1. / np.sqrt(S), V) self.components_ = basis self.component_indices_ = inds return self
Example 3
def triplet_loss(anchor, positive, negative, alpha): """Calculate the triplet loss according to the FaceNet paper Args: anchor: the embeddings for the anchor images. positive: the embeddings for the positive images. negative: the embeddings for the negative images. Returns: the triplet loss according to the FaceNet paper as a float tensor. """ with tf.variable_scope('triplet_loss'): pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1) neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1) basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss
Example 4
def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov
Example 5
def batch_iou(boxes, box): """Compute the Intersection-Over-Union of a batch of boxes with another box. Args: box1: 2D array of [cx, cy, width, height]. box2: a single array of [cx, cy, width, height] Returns: ious: array of a float number in range [0, 1]. """ lr = np.maximum( np.minimum(boxes[:,0]+0.5*boxes[:,2], box[0]+0.5*box[2]) - \ np.maximum(boxes[:,0]-0.5*boxes[:,2], box[0]-0.5*box[2]), 0 ) tb = np.maximum( np.minimum(boxes[:,1]+0.5*boxes[:,3], box[1]+0.5*box[3]) - \ np.maximum(boxes[:,1]-0.5*boxes[:,3], box[1]-0.5*box[3]), 0 ) inter = lr*tb union = boxes[:,2]*boxes[:,3] + box[2]*box[3] - inter return inter/union
Example 6
def imax(arrays, axis, ignore_nan = False): """ Maximum of a stream of arrays along an axis. Parameters ---------- arrays : iterable Arrays to be reduced. axis : int or None, optional Axis along which the maximum is found. The default is to find the maximum along the 'stream axis', as if all arrays in ``array`` were stacked along a new dimension. If ``axis = None``, arrays in ``arrays`` are flattened before reduction. ignore_nan : bool, optional If True, NaNs are ignored. Default is propagation of NaNs. Yields ------ online_max : ndarray Cumulative maximum. """ ufunc = np.fmax if ignore_nan else np.maximum yield from ireduce_ufunc(arrays, ufunc, axis)
Example 7
def fCauchy(ftrue, alpha, p): """Returns Cauchy model noisy value Cauchy with median 1e3*alpha and with p=0.2, zero otherwise P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032 """ # expects ftrue to be a np.array popsi = np.shape(ftrue) fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) * _randn(popsi) / (np.abs(_randn(popsi)) + 1e-199)) tol = 1e-8 fval = fval + 1.01 * tol idx = ftrue < tol try: fval[idx] = ftrue[idx] except IndexError: # fval is a scalar if idx: fval = ftrue return fval ### CLASS DEFINITION ###
Example 8
def forward(self, input): """During the forward pass, it inhibits all inhibitions below some threshold :math:`?`, typically :math:`0`. In other words, it computes point-wise .. math:: y=max(0,x) Parameters ---------- x : float32 The activation (the summed, weighted input of a neuron). Returns ------- float32 The output of the rectify function applied to the activation. """ self.last_forward = input return np.maximum(0.0, input)
Example 9
def update(self, params, grads): # init self.iterations += 1 a_t = self.lr / (1 - np.power(self.beta1, self.iterations)) if self.ms is None: self.ms = [_zero(p.shape) for p in params] if self.vs is None: self.vs = [_zero(p.shape) for p in params] # update parameters for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)): m = self.beta1 * m + (1 - self.beta1) * g v = np.maximum(self.beta2 * v, np.abs(g)) p -= a_t * m / (v + self.epsilon) self.ms[i] = m self.vs[i] = v
Example 10
def plot_beta(): '''plot beta over training ''' beta = args.beta scale = args.scale beta_min = args.beta_min num_epoch = args.num_epoch epoch_size = int(float(args.num_examples) / args.batch_size) x = np.arange(num_epoch*epoch_size) y = beta * np.power(scale, x) y = np.maximum(y, beta_min) epoch_x = np.arange(num_epoch) * epoch_size epoch_y = beta * np.power(scale, epoch_x) epoch_y = np.maximum(epoch_y, beta_min) # plot beta descent curve plt.semilogy(x, y) plt.semilogy(epoch_x, epoch_y, 'ro') plt.title('beta descent') plt.ylabel('beta') plt.xlabel('epoch') plt.show()
Example 11
def convert_to_square(bbox): """Convert bbox to square Parameters: ---------- bbox: numpy array , shape n x 5 input bbox Returns: ------- square bbox """ square_bbox = bbox.copy() h = bbox[:, 3] - bbox[:, 1] + 1 w = bbox[:, 2] - bbox[:, 0] + 1 max_side = np.maximum(h,w) square_bbox[:, 0] = bbox[:, 0] + w*0.5 - max_side*0.5 square_bbox[:, 1] = bbox[:, 1] + h*0.5 - max_side*0.5 square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1 square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1 return square_bbox
Example 12
def custom_crop(img, bbox): # bbox = [x-left, y-top, width, height] imsiz = img.shape # [height, width, channel] # if box[0] + box[2] >= imsiz[1] or\ # box[1] + box[3] >= imsiz[0] or\ # box[0] <= 0 or\ # box[1] <= 0: # box[0] = np.maximum(0, box[0]) # box[1] = np.maximum(0, box[1]) # box[2] = np.minimum(imsiz[1] - box[0] - 1, box[2]) # box[3] = np.minimum(imsiz[0] - box[1] - 1, box[3]) center_x = int((2 * bbox[0] + bbox[2]) / 2) center_y = int((2 * bbox[1] + bbox[3]) / 2) R = int(np.maximum(bbox[2], bbox[3]) * 0.75) y1 = np.maximum(0, center_y - R) y2 = np.minimum(imsiz[0], center_y + R) x1 = np.maximum(0, center_x - R) x2 = np.minimum(imsiz[1], center_x + R) img_cropped = img[y1:y2, x1:x2, :] return img_cropped
Example 13
def append(self, x): self._count += 1 if self._count == 1: self.m = x self.last_m = x self.last_s = 0.0 self.min = x self.max = x else: self.m = self.last_m + (x - self.last_m) / self._count self.s = self.last_s + (x - self.last_m) * (x - self.m) self.last_m = self.m self.last_s = self.s self.min = numpy.minimum(self.min, x) self.max = numpy.maximum(self.max, x)
Example 14
def clip_boxes(boxes, im_shape): """ Clip boxes to image boundaries. :param boxes: [N, 4* num_classes] :param im_shape: tuple of 2 :return: [N, 4* num_classes] """ # x1 >= 0 boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0) # y1 >= 0 boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0) # x2 < im_shape[1] boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0) # y2 < im_shape[0] boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0) return boxes
Example 15
def apply_nms(all_boxes, thresh): """Apply non-maximum suppression to all predicted boxes output by the test_net method. """ num_classes = len(all_boxes) num_images = len(all_boxes[0]) nms_boxes = [[[] for _ in xrange(num_images)] for _ in xrange(num_classes)] for cls_ind in xrange(num_classes): for im_ind in xrange(num_images): dets = all_boxes[cls_ind][im_ind] if dets == []: continue keep = nms(dets, thresh) if len(keep) == 0: continue nms_boxes[cls_ind][im_ind] = dets[keep, :].copy() return nms_boxes
Example 16
def get_IOU(rec1, rec2): """ rec1&2 are both np.arrays with x_center, y_center, width, height should work with any dimension as long as the last dimension is 4 """ rec1_xy_max = rec1[..., :2] + (rec1[..., 2:4] - 1) / 2 rec1_xy_min = rec1[..., :2] - (rec1[..., 2:4] - 1) / 2 rec2_xy_max = rec2[..., :2] + (rec2[..., 2:4] - 1) / 2 rec2_xy_min = rec2[..., :2] - (rec2[..., 2:4] - 1) / 2 intersec_max = np.minimum(rec1_xy_max, rec2_xy_max) intersec_min = np.maximum(rec1_xy_min, rec2_xy_min) intersec_wh = np.maximum(intersec_max - intersec_min + 1, 0) intersec_area = intersec_wh[..., 0] * intersec_wh[..., 1] area1 = rec1[..., 2] * rec1[..., 3] area2 = rec2[..., 2] * rec2[..., 3] union = area1 + area2 - intersec_area return intersec_area / union
Example 17
def set_responsibilities(anchor_frames, iou_thresh=0.6): """ Changes the IOU values for the anchor frames to binary values anchor_frames: list of frames where each frame contains all features for a specific anchor iou_thresh: threshold to decide which anchor is responsible """ # set box with maximum IOU to 1 anchor_frames = [frame.copy() for frame in anchor_frames] # find maximum IOU value over all frames helper_array = np.array([frame[frame.columns[0]] for frame in anchor_frames]).T max_indices = np.argmax(helper_array, axis=1) data_idx = np.arange(len(max_indices)) for obj_idx, frame_idx in zip(data_idx, max_indices): temp_frame = anchor_frames[frame_idx] temp_frame.loc[obj_idx, temp_frame.columns[0]] = 1 # applying the iou threshold on a copy of the dataframes for frame in anchor_frames: frame[frame.columns[0]] = np.digitize(frame[frame.columns[0]], [iou_thresh]) return anchor_frames
Example 18
def divergence(self, V, W, H): """ Compute divergence between reconstruction and original """ R = np.maximum(np.dot(W,H), eps) V = np.maximum(V, eps) err = 0 if self.update == self.kl_updates: err = np.sum(np.multiply(V, np.log(V/R)) - V + R) elif self.update == self.euc_updates: err = np.sum((V - np.dot(W,H)) ** 2) elif self.update == self.is_updates: err = np.sum(V/R - np.log(V/R) - 1) elif self.update == self.beta_updates: err = (np.sum(V ** self.beta + (self.beta -1) * R ** self.beta - self.beta * V * R ** (self.beta - 1)) / (self.beta * (self.beta - 1))) return err
Example 19
def beta_updates(self, V, W, H): """ Optimize B-divergence """ if self.update_W: R = np.maximum(np.dot(W,H), eps) W *= (np.dot(R ** (self.beta - 2) * V, H.T) / np.maximum(np.dot(R ** (self.beta -1), H.T), eps)) W = self.normalize(W, self.W_norm, 0) if self.update_H: R = np.maximum(np.dot(W,H), eps) H *= (np.dot(W.T, R ** (self.beta -2) * V) / np.maximum(np.dot(W.T, R ** (self.beta -1)), eps)) H = self.normalize(H, self.H_norm, 1) return [V, W, H]
Example 20
def interp1d_(xin_,xp,yp_): """ Interpolate a uniformly sampled piecewise linear function. Mapping elements from xin_ to the result. Input values will be clipped to range of xp. xin_ : input tensor (real) xp : x grid (constant -- must be a 1d numpy array, uniformly spaced) yp_ : tensor of the result values at the gridpoints xp """ import tensorflow as tf x_ = tf.clip_by_value(xin_,xp.min(),xp.max()) dx = xp[1]-xp[0] assert len(xp.shape)==1,'only 1d interpolation' assert xp.shape[0]==int(yp_.get_shape()[0]) assert abs(np.diff(xp)/dx - 1.0).max() < 1e-6,'must be uniformly sampled' newshape = [ ] x1_ = tf.expand_dims(x_,-1) dt = yp_.dtype wt_ = tf.maximum(tf.constant(0.,dtype=dt), 1-abs(x1_ - tf.constant(xp,dtype=dt))/dx ) y_ = tf.reduce_sum(wt_ * yp_,axis=-1) return y_
Example 21
def overlap_ratio(boxes1, boxes2): # find intersection bbox x_int_bot = np.maximum(boxes1[:, 0], boxes2[0]) x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[0] + boxes2[2]) y_int_bot = np.maximum(boxes1[:, 1], boxes2[1]) y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[1] + boxes2[3]) # find intersection area dx = x_int_top - x_int_bot dy = y_int_top - y_int_bot area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx)) # find union area_union = boxes1[:,2] * boxes1[:,3] + boxes2[2] * boxes2[3] - area_int # find overlap ratio ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int)) return ratio ########################################################################### # overlap_ratio of two bboxes # ###########################################################################
Example 22
def overlap_ratio_pair(boxes1, boxes2): # find intersection bbox x_int_bot = np.maximum(boxes1[:, 0], boxes2[:, 0]) x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[:, 0] + boxes2[:, 2]) y_int_bot = np.maximum(boxes1[:, 1], boxes2[:, 1]) y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[:, 1] + boxes2[:, 3]) # find intersection area dx = x_int_top - x_int_bot dy = y_int_top - y_int_bot area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx)) # find union area_union = boxes1[:,2] * boxes1[:,3] + boxes2[:, 2] * boxes2[:, 3] - area_int # find overlap ratio ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int)) return ratio
Example 23
def lerp(p0, p1, t): """Linear interpolation.""" return (1.0 - t) * p0 + t * p1 # A note on formats: # Sketches are encoded as a sequence of strokes. stroke-3 and stroke-5 are # different stroke encodings. # stroke-3 uses 3-tuples, consisting of x-offset, y-offset, and a binary # variable which is 1 if the pen is lifted between this position and # the next, and 0 otherwise. # stroke-5 consists of x-offset, y-offset, and p_1, p_2, p_3, a binary # one-hot vector of 3 possible pen states: pen down, pen up, end of sketch. # See section 3.1 of https://arxiv.org/abs/1704.03477 for more detail. # Sketch-RNN takes input in stroke-5 format, with sketches padded to a common # maximum length and prefixed by the special start token [0, 0, 1, 0, 0] # The QuickDraw dataset is stored using stroke-3.
Example 24
def eqsize(*args): m = 0 varargout = [None] * (len(args) + 1) for a in range(0, len(args)): p1 = args[a] for i in range(0, p1.size): m = np.maximum(m, (p1[i].monomials).shape[0]) for a in range(len(args), -1, -1): p1 = args[a] for i in range(0, p1.size): if (p1[i].monomials).shape[0] < m: p1[i].monomials[m, :] = 0 varargout[a] = p1 return varargout
Example 25
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max): """ TensorFlow implementation for apply perturbations to input features based on salency maps :param i: index of first selected feature :param j: index of second selected feature :param X: a matrix containing our input features for our sample :param increase: boolean; true if we are increasing pixels, false otherwise :param theta: delta for each feature adjustment :param clip_min: mininum value for a feature in our sample :param clip_max: maximum value for a feature in our sample : return: a perturbed input feature matrix for a target class """ # perturb our input sample if increase: X[0, i] = np.minimum(clip_max, X[0, i] + theta) X[0, j] = np.minimum(clip_max, X[0, j] + theta) else: X[0, i] = np.maximum(clip_min, X[0, i] - theta) X[0, j] = np.maximum(clip_min, X[0, j] - theta) return X
Example 26
def _update_statistics(self, new_stats, stats): new_stats = create_dict(new_stats) if stats is None: stats = new_stats return stats # update the stats layerwise for l_i in range(len(stats)): for subtype,_ in subtypes: # TODO: Have to check the type to see if this is needed cnt_old = 1.0 * stats[l_i][subtype]['cnt'] stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt'] + new_stats[l_i][subtype]['cnt']) norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0) for key in subtype_keys: if key not in subtype_keys_no_aggregation: tmp_old = cnt_old / norm * stats[l_i][subtype][key] tmp_new = (new_stats[l_i][subtype]['cnt'] / norm * new_stats[l_i][subtype][key]) stats[l_i][subtype][key] = tmp_old + tmp_new return stats
Example 27
def _update_statistics(self, new_stats, stats): new_stats = create_dict(new_stats) if stats is None: stats = new_stats return stats # update the stats layerwise for l_i in range(len(stats)): for subtype,_ in subtypes: # TODO: Have to check the type to see if this is needed cnt_old = 1.0 * stats[l_i][subtype]['cnt'] stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt'] + new_stats[l_i][subtype]['cnt']) norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0) for key in subtype_keys: if key not in subtype_keys_no_aggregation: tmp_old = cnt_old / norm * stats[l_i][subtype][key] tmp_new = (new_stats[l_i][subtype]['cnt'] / norm * new_stats[l_i][subtype][key]) stats[l_i][subtype][key] = tmp_old + tmp_new return stats
Example 28
def iou_loss(p, t): # print "pass" tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2)) overlaps_t0 = T.maximum(tp[:, 0, :], tt[:, 0, :]) overlaps_t1 = T.minimum(tp[:, 1, :], tt[:, 1, :]) intersection = overlaps_t1 - overlaps_t0 bool_overlap = T.min(intersection, axis=1) > 0 intersection = intersection[:, 0] * intersection[:, 1] intersection = T.maximum(intersection, np.float32(0.)) dims_p = tp[:, 1, :] - tp[:, 0, :] areas_p = dims_p[:, 0] * dims_p[:, 1] dims_t = tt[:, 1, :] - tt[:, 0, :] areas_t = dims_t[:, 0] * dims_t[:, 1] union = areas_p + areas_t - intersection loss = 1. - T.minimum( T.exp(T.log(T.abs_(intersection)) - T.log(T.abs_(union) + np.float32(1e-5))), np.float32(1.) ) # return loss return T.mean(loss)
Example 29
def iou_loss_val(p, t): tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2)) overlaps = np.zeros_like(tp, dtype=np.float32) overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :]) overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :]) intersection = overlaps[:, 1, :] - overlaps[:, 0, :] bool_overlap = np.min(intersection, axis=1) > 0 intersection = intersection[:, 0] * intersection[:, 1] intersection = np.maximum(intersection, 0.) # print "bool", bool_overlap # print "Int", intersection dims_p = tp[:, 1, :] - tp[:, 0, :] areas_p = dims_p[:, 0] * dims_p[:, 1] dims_t = tt[:, 1, :] - tt[:, 0, :] areas_t = dims_t[:, 0] * dims_t[:, 1] union = areas_p + areas_t - intersection # print "un", union loss = 1. - np.minimum( np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)), 1. ) # print loss return np.mean(loss)
Example 30
def _log_single(x): """Sanitized log function for a single element. Since this method internally calls np.log and carries the (very likely) possibility to overflow, the method suppresses all warnings. #XXX: at some point we might want to let ``suppress_warnings`` # specify exactly which types of warnings it should filter. Parameters ---------- x : float, int The number to log Returns ------- val : float the log of x """ x = np.maximum(0, x) val = __min_log__ if x == 0 else np.maximum(__min_log__, np.log(x)) return val
Example 31
def svm_loss(x, y): """ Computes the loss and gradient using for multiclass SVM classification. Inputs: - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class for the ith input. - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and 0 <= y[i] < C Returns a tuple of: - loss: Scalar giving the loss - dx: Gradient of the loss with respect to x """ N = x.shape[0] correct_class_scores = x[np.arange(N), y] margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0) margins[np.arange(N), y] = 0 loss = np.sum(margins) / N num_pos = np.sum(margins > 0, axis=1) dx = np.zeros_like(x) dx[margins > 0] = 1 dx[np.arange(N), y] -= num_pos dx /= N return loss, dx
Example 32
def random_hsv_image(bgr_image, delta_hue, delta_sat_scale, delta_val_scale): hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV).astype(np.float32) # hue hsv_image[:, :, 0] += int((np.random.rand() * delta_hue * 2 - delta_hue) * 255) # sat sat_scale = 1 + np.random.rand() * delta_sat_scale * 2 - delta_sat_scale hsv_image[:, :, 1] *= sat_scale # val val_scale = 1 + np.random.rand() * delta_val_scale * 2 - delta_val_scale hsv_image[:, :, 2] *= val_scale hsv_image[hsv_image < 0] = 0 hsv_image[hsv_image > 255] = 255 hsv_image = hsv_image.astype(np.uint8) bgr_image = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR) return bgr_image # non maximum suppression
Example 33
def reshape_to_yolo_size(img): input_width, input_height = img.size min_pixel = 320.0 #max_pixel = 608 max_pixel = 1024.0 min_edge = np.minimum(input_width, input_height) if min_edge < min_pixel: input_width *= min_pixel / min_edge input_height *= min_pixel / min_edge max_edge = np.maximum(input_width, input_height) if max_edge > max_pixel: input_width *= max_pixel / max_edge input_height *= max_pixel / max_edge input_width = int(input_width / 32.0 + round(input_width % 32 / 32.0)) * 32 input_height = int(input_height / 32.0 + round(input_height % 32 / 32.0)) * 32 img = img.resize((input_width, input_height)) return img
Example 34
def test_reduce(self): dflt = np.typecodes['AllFloat'] dint = np.typecodes['AllInteger'] seq1 = np.arange(11) seq2 = seq1[::-1] func = np.maximum.reduce for dt in dint: tmp1 = seq1.astype(dt) tmp2 = seq2.astype(dt) assert_equal(func(tmp1), 10) assert_equal(func(tmp2), 10) for dt in dflt: tmp1 = seq1.astype(dt) tmp2 = seq2.astype(dt) assert_equal(func(tmp1), 10) assert_equal(func(tmp2), 10) tmp1[::2] = np.nan tmp2[::2] = np.nan assert_equal(func(tmp1), np.nan) assert_equal(func(tmp2), np.nan)
Example 35
def test_truth_table_logical(self): # 2, 3 and 4 serves as true values input1 = [0, 0, 3, 2] input2 = [0, 4, 0, 2] typecodes = (np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + '?') # boolean for dtype in map(np.dtype, typecodes): arg1 = np.asarray(input1, dtype=dtype) arg2 = np.asarray(input2, dtype=dtype) # OR out = [False, True, True, True] for func in (np.logical_or, np.maximum): assert_equal(func(arg1, arg2).astype(bool), out) # AND out = [False, False, False, True] for func in (np.logical_and, np.minimum): assert_equal(func(arg1, arg2).astype(bool), out) # XOR out = [False, True, True, False] for func in (np.logical_xor, np.not_equal): assert_equal(func(arg1, arg2).astype(bool), out)
Example 36
def test_minmax_func(self): # Tests minimum and maximum. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d # max doesn't work if shaped xr = np.ravel(x) xmr = ravel(xm) # following are true because of careful selection of data assert_equal(max(xr), maximum(xmr)) assert_equal(min(xr), minimum(xmr)) assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]) assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]) x = arange(5) y = arange(5) - 2 x[3] = masked y[0] = masked assert_equal(minimum(x, y), where(less(x, y), x, y)) assert_equal(maximum(x, y), where(greater(x, y), x, y)) assert_(minimum(x) == 0) assert_(maximum(x) == 4) x = arange(4).reshape(2, 2) x[-1, -1] = masked assert_equal(maximum(x), 2)
Example 37
def applyPadding(inputImg, sampleSizes, receptiveField) : receptiveField_arr = np.asarray(receptiveField, dtype="int16") inputImg_arr = np.asarray(inputImg.shape,dtype="int16") receptiveField = np.array(receptiveField, dtype="int16") left_padding = (receptiveField - 1) / 2 right_padding = receptiveField - 1 - left_padding extra_padding = np.maximum(0, np.asarray(sampleSizes,dtype="int16")-(inputImg_arr+left_padding+right_padding)) right_padding += extra_padding paddingValues = ( (left_padding[0],right_padding[0]), (left_padding[1],right_padding[1]), (left_padding[2],right_padding[2])) paddedImage = lib.pad(inputImg, paddingValues, mode='reflect' ) return [paddedImage, paddingValues] # ----- Apply unpadding ---------
Example 38
def _natural_cubic_spline_basis_expansion(xpts,knots): num_knots = len(knots) num_pts = len(xpts) outmat = np.zeros((num_pts,num_knots)) outmat[:,0]= np.ones(num_pts) outmat[:,1] = xpts def make_func_H(k): def make_func_d(k): def func_d(x): denom = knots[-1] - knots[k-1] numer = np.maximum(x-knots[k-1],np.zeros(len(x)))**3 - np.maximum(x-knots[-1],np.zeros(len(x)))**3 return numer/denom return func_d def func_H(x): d_fun_k = make_func_d(k) d_fun_Km1 = make_func_d(num_knots-1) return d_fun_k(x) - d_fun_Km1(x) return func_H for i in range(1,num_knots-1): curr_H_fun = make_func_H(i) outmat[:,i+1] = curr_H_fun(xpts) return outmat
Example 39
def batch_iou(proposals, gt): bboxes = np.transpose(proposals).reshape((4, -1, 1)) bboxes_x1 = bboxes[0] bboxes_x2 = bboxes[0]+bboxes[2] bboxes_y1 = bboxes[1] bboxes_y2 = bboxes[1]+bboxes[3] gt = np.transpose(gt).reshape((4, 1, -1)) gt_x1 = gt[0] gt_x2 = gt[0]+gt[2] gt_y1 = gt[1] gt_y2 = gt[1]+gt[3] widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) - np.maximum(bboxes_x1, gt_x1)) heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) - np.maximum(bboxes_y1, gt_y1)) intersection = widths*heights union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection return (intersection / union)
Example 40
def batch_iou(proposals, gt): bboxes = np.transpose(proposals).reshape((4, -1, 1)) bboxes_x1 = bboxes[0] bboxes_x2 = bboxes[0]+bboxes[2] bboxes_y1 = bboxes[1] bboxes_y2 = bboxes[1]+bboxes[3] gt = np.transpose(gt).reshape((4, 1, -1)) gt_x1 = gt[0] gt_x2 = gt[0]+gt[2] gt_y1 = gt[1] gt_y2 = gt[1]+gt[3] widths = np.maximum(0, np.minimum(bboxes_x2, gt_x2) - np.maximum(bboxes_x1, gt_x1)) heights = np.maximum(0, np.minimum(bboxes_y2, gt_y2) - np.maximum(bboxes_y1, gt_y1)) intersection = widths*heights union = bboxes[2]*bboxes[3] + gt[2]*gt[3] - intersection return (intersection / union)
Example 41
def decode_bboxes(tcoords, anchors): var_x, var_y, var_w, var_h = config['prior_variance'] t_x = tcoords[:, 0]*var_x t_y = tcoords[:, 1]*var_y t_w = tcoords[:, 2]*var_w t_h = tcoords[:, 3]*var_h a_w = anchors[:, 2] a_h = anchors[:, 3] a_x = anchors[:, 0]+a_w/2 a_y = anchors[:, 1]+a_h/2 x = t_x*a_w + a_x y = t_y*a_h + a_y w = tf.exp(t_w)*a_w h = tf.exp(t_h)*a_h x1 = tf.maximum(0., x - w/2) y1 = tf.maximum(0., y - h/2) x2 = tf.minimum(1., w + x1) y2 = tf.minimum(1., h + y1) return tf.stack([y1, x1, y2, x2], axis=1)
Example 42
def plot(self, image, filename, save_sample): """ Plot an image.""" image = np.minimum(image, 1) image = np.maximum(image, -1) image = np.squeeze(image) # Scale to 0..255. imin, imax = image.min(), image.max() image = (image - imin) * 255. / (imax - imin) + .5 image = image.astype(np.uint8) if save_sample: try: Image.fromarray(image).save(filename) except Exception as e: print("Warning: could not sample to ", filename, ". Please check permissions and make sure the path exists") print(e) GlobalViewer.update(image)
Example 43
def sample_output(self, val): vocabulary = self.get_vocabulary() if self.one_hot: vals = [ np.argmax(r) for r in val ] ox_val = [vocabulary[obj] for obj in list(vals)] string = "".join(ox_val) return string else: val = np.reshape(val, [-1]) val *= len(vocabulary)/2.0 val += len(vocabulary)/2.0 val = np.round(val) val = np.maximum(0, val) val = np.minimum(len(vocabulary)-1, val) ox_val = [self.get_character(obj) for obj in list(val)] string = "".join(ox_val) return string
Example 44
def get_union_sets(lang_codes, feature_set_str): feature_set_parts = feature_set_str.split("|") feature_names, feature_values = get_named_set(lang_codes, feature_set_parts[0]) for feature_set_part in feature_set_parts[1:]: more_feature_names, more_feature_values = get_named_set(lang_codes, feature_set_part) if len(feature_names) != len(more_feature_names): print("ERROR: Cannot perform elementwise union on feature sets of different size") sys.exit(0) feature_values = np.maximum(feature_values, more_feature_values) return feature_names, feature_values
Example 45
def prewhiten(x): mean = np.mean(x) std = np.std(x) std_adj = np.maximum(std, 1.0/np.sqrt(x.size)) y = np.multiply(np.subtract(x, mean), 1/std_adj) return y
Example 46
def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[::len(precision) + 1] += 1. / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision
Example 47
def voc_ap(rec, prec, use_07_metric=False): """ ap = voc_ap(rec, prec, [use_07_metric]) Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11 point method (default:False). """ if use_07_metric: # 11 point metric ap = 0. for t in np.arange(0., 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11. else: # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
Example 48
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0): L = post['L'] if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L))) ns = diagKTestTest.shape[0] nperbatch = 5000 nact = 0 #allocate mem fmu = np.zeros(ns) #column vector (of length ns) of predictive latent means fs2 = np.zeros(ns) #column vector (of length ns) of predictive latent variances while (nact<(ns-1)): id = np.arange(nact, np.minimum(nact+nperbatch, ns)) kss = diagKTestTest[id] Ks = KtrainTest[:, id] if (len(post['alpha'].shape) == 1): try: Fmu = intercept[id] + Ks.T.dot(post['alpha']) except: Fmu = intercept + Ks.T.dot(post['alpha']) fmu[id] = Fmu else: try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha']) except: Fmu = intercept + Ks.T.dot(post['alpha']) fmu[id] = Fmu.mean(axis=1) if Lchol: V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True) fs2[id] = kss - np.sum(V**2, axis=0) #predictive variances else: fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0) #predictive variances fs2[id] = np.maximum(fs2[id],0) #remove numerical noise i.e. negative variances nact = id[-1] #set counter to index of last processed data point return fmu, fs2
Example 49
def sq_dist(a, b=None): #mean-center for numerical stability D, n = a.shape[0], a.shape[1] if (b is None): mu = a.mean(axis=1) a -= mu[:, np.newaxis] b = a m = n aSq = np.sum(a**2, axis=0) bSq = aSq else: d, m = b.shape[0], b.shape[1] if (d != D): raise Exception('column lengths must agree') mu = (float(m)/float(m+n))*b.mean(axis=1) + (float(n)/float(m+n))*a.mean(axis=1) a -= mu[:, np.newaxis] b -= mu[:, np.newaxis] aSq = np.sum(a**2, axis=0) bSq = np.sum(b**2, axis=0) C = np.tile(np.column_stack(aSq).T, (1, m)) + np.tile(bSq, (n, 1)) - 2*a.T.dot(b) C = np.maximum(C, 0) #remove numerical noise return C #evaluate 'power sums' of the individual terms in Z
Example 50
def __init__(self, X): Kernel.__init__(self) self.X_scaled = X/np.sqrt(X.shape[1]) if (X.shape[1] >= X.shape[0] or True): self.K_sq = sq_dist(self.X_scaled.T) else: self.K_sq = None self.j = np.floor(X.shape[1]/2.0)+self.v+1 self.pp = lambda r,j,v,f: np.maximum(1-r, 0)**(j+v) * self.f(r,j) self.dpp = lambda r,j,v,f: np.maximum(1-r, 0)**(j+v-1) * r * ((j+v)*f(r,j) - np.maximum(1-r,0)*self.df(r,j))