The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def _convert(matrix, arr): """Do the color space conversion. Parameters ---------- matrix : array_like The 3x3 matrix to use. arr : array_like The input array. Returns ------- out : ndarray, dtype=float The converted array. """ arr = _prepare_colorarray(arr) arr = np.swapaxes(arr, 0, -1) oldshape = arr.shape arr = np.reshape(arr, (3, -1)) out = np.dot(matrix, arr) out.shape = oldshape out = np.swapaxes(out, -1, 0) return np.ascontiguousarray(out)
Example 2
def filter_sort_unique(self, max_objval=float('Inf')): # filter if max_objval < float('inf'): good_idx = self.objvals <= max_objval self.objvals = self.objvals[good_idx] self.solutions = self.solutions[good_idx] if len(self.objvals) > 0: sort_idx = np.argsort(self.objvals) self.objvals = self.objvals[sort_idx] self.solutions = self.solutions[sort_idx] # unique b = np.ascontiguousarray(self.solutions).view( np.dtype((np.void, self.solutions.dtype.itemsize * self.P))) _, unique_idx = np.unique(b, return_index=True) self.objvals = self.objvals[unique_idx] self.solutions = self.solutions[unique_idx]
Example 3
def get_screen(self): screen = self.env.render(mode='rgb_array').transpose( (2, 0, 1)) # transpose into torch order (CHW) # Strip off the top and bottom of the screen screen = screen[:, 160:320] view_width = 320 cart_location = self.get_cart_location() if cart_location < view_width // 2: slice_range = slice(view_width) elif cart_location > (self.screen_width - view_width // 2): slice_range = slice(-view_width, None) else: slice_range = slice(cart_location - view_width // 2, cart_location + view_width // 2) # Strip off the edges, so that we have a square image centered on a cart screen = screen[:, :, slice_range] # Convert to float, rescare, convert to torch tensor # (this doesn't require a copy) screen = np.ascontiguousarray(screen, dtype=np.float32) / 255 screen = torch.from_numpy(screen) # Resize, and add a batch dimension (BCHW) return self.resize(screen).numpy()
Example 4
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 5
def fit(self, X, y): """ Estimate the topic distributions per document (theta), term distributions per topic (phi), and regression coefficients (eta). Parameters ---------- X : array-like, shape = (n_docs, n_terms) The document-term matrix. y : array-like, shape = (n_docs,) Response values for each document. """ self.doc_term_matrix = X self.n_docs, self.n_terms = X.shape self.n_tokens = X.sum() doc_lookup, term_lookup = self._create_lookups(X) # iterate self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_slda( self.n_iter, self.n_report_iter, self.n_topics, self.n_docs, self.n_terms, self.n_tokens, self.alpha, self.beta, self.mu, self.nu2, self.sigma2, doc_lookup, term_lookup, np.ascontiguousarray(y, dtype=np.float64), self.seed)
Example 6
def fit(self, X, y): """ Estimate the topic distributions per document (theta), term distributions per topic (phi), and regression coefficients (eta). Parameters ---------- X : array-like, shape = (n_docs, n_terms) The document-term matrix. y : array-like, shape = (n_docs,) Response values for each document. """ self.doc_term_matrix = X self.n_docs, self.n_terms = X.shape self.n_tokens = X.sum() doc_lookup, term_lookup = self._create_lookups(X) # iterate self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_blslda( self.n_iter, self.n_report_iter, self.n_topics, self.n_docs, self.n_terms, self.n_tokens, self.alpha, self.beta, self.mu, self.nu2, self.b, doc_lookup, term_lookup, np.ascontiguousarray(y, dtype=np.float64), self.seed)
Example 7
def fit(self, X, y): """ Estimate the topic distributions per document (theta), term distributions per topic (phi), and regression coefficients (eta). Parameters ---------- X : array-like, shape = (n_docs, n_terms) The document-term matrix. y : array-like, shape = (n_docs,) Response values for each document. """ self.doc_term_matrix = X self.n_docs, self.n_terms = X.shape self.n_tokens = X.sum() doc_lookup, term_lookup = self._create_lookups(X) # iterate self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_slda( self.n_iter, self.n_report_iter, self.n_topics, self.n_docs, self.n_terms, self.n_tokens, self.alpha, self.beta, self.mu, self.nu2, self.sigma2, doc_lookup, term_lookup, np.ascontiguousarray(y, dtype=np.float64), self.seed)
Example 8
def fit(self, X, y): """ Estimate the topic distributions per document (theta), term distributions per topic (phi), and regression coefficients (eta). Parameters ---------- X : array-like, shape = (n_docs, n_terms) The document-term matrix. y : array-like, shape = (n_docs,) Response values for each document. """ self.doc_term_matrix = X self.n_docs, self.n_terms = X.shape self.n_tokens = X.sum() doc_lookup, term_lookup = self._create_lookups(X) # iterate self.theta, self.phi, self.eta, self.loglikelihoods = gibbs_sampler_blslda( self.n_iter, self.n_report_iter, self.n_topics, self.n_docs, self.n_terms, self.n_tokens, self.alpha, self.beta, self.mu, self.nu2, self.b, doc_lookup, term_lookup, np.ascontiguousarray(y, dtype=np.float64), self.seed)
Example 9
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 10
def console_fill_foreground(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_foreground(con, cr, cg, cb)
Example 11
def console_fill_background(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_background(con, cr, cg, cb)
Example 12
def predict(self, inputs): # uses MEMORY_DATA layer for loading images and postprocessing DENSE_CRF layer img = inputs[0].transpose((2, 0, 1)) img = img[np.newaxis, :].astype(np.float32) label = np.zeros((1, 1, 1, 1), np.float32) data_dim = np.zeros((1, 1, 1, 2), np.float32) data_dim[0][0][0][0] = img.shape[2] data_dim[0][0][0][1] = img.shape[3] img = np.ascontiguousarray(img, dtype=np.float32) label = np.ascontiguousarray(label, dtype=np.float32) data_dim = np.ascontiguousarray(data_dim, dtype=np.float32) self.set_input_arrays(img, label, data_dim) out = self.forward() predictions = out[self.outputs[0]] # the output layer should be called crf_inf segm_result = predictions[0].argmax(axis=0).astype(np.uint8) return segm_result
Example 13
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 14
def write_features_hdf5(train, valid, test): f = h5py.File(args.hdf5, "w") train_grp = f.create_group("train") train_x = train_grp.create_dataset("train_x", train[0].shape, dtype='f', compression="gzip", compression_opts=9) train_y = train_grp.create_dataset("train_y", train[1].shape, dtype='i', compression="gzip", compression_opts=9) valid_grp = f.create_group("valid") valid_x = valid_grp.create_dataset("valid_x", valid[0].shape, dtype='f', compression="gzip", compression_opts=9) valid_y = valid_grp.create_dataset("valid_y", valid[1].shape, dtype='i', compression="gzip", compression_opts=9) test_grp = f.create_group("test") test_x = test_grp.create_dataset("test_x", test[0].shape, dtype='f', compression="gzip", compression_opts=9) test_y = test_grp.create_dataset("test_y", test[1].shape, dtype='i', compression="gzip", compression_opts=9) train_x.write_direct(np.ascontiguousarray(train[0], dtype=train[0].dtype)) train_y.write_direct(np.ascontiguousarray(train[1], dtype=train[1].dtype)) valid_x.write_direct(np.ascontiguousarray(valid[0], dtype=valid[0].dtype)) valid_y.write_direct(np.ascontiguousarray(valid[1], dtype=valid[1].dtype)) test_x.write_direct(np.ascontiguousarray(test[0], dtype=test[0].dtype)) test_y.write_direct(np.ascontiguousarray(test[1], dtype=test[1].dtype)) f.close()
Example 15
def preprocess_image(img, cuda=False): means=[0.485, 0.456, 0.406] stds=[0.229, 0.224, 0.225] preprocessed_img = img.copy()[: , :, ::-1] for i in range(3): preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i] preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i] preprocessed_img = \ np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1))) preprocessed_img = torch.from_numpy(preprocessed_img) preprocessed_img.unsqueeze_(0) if cuda: preprocessed_img = Variable(preprocessed_img.cuda(), requires_grad=True) else: preprocessed_img = Variable(preprocessed_img, requires_grad=True) return preprocessed_img
Example 16
def find_boundary(mesh,vals,threshold=0.5): """ Find boundary points on the phase diagram where the switching probability = threshold """ boundary_points = [] durs = mesh.points[:,0] volts = mesh.points[:,1] indices, indptr = mesh.vertex_neighbor_vertices for k in range(len(vals)): for k_nb in indptr[indices[k]:indices[k+1]]: if (vals[k]-threshold)*(vals[k_nb]-threshold)<0: x0 = find_cross([durs[k],vals[k]],[durs[k_nb],vals[k_nb]],cut=threshold) y0 = find_cross([volts[k],vals[k]],[volts[k_nb],vals[k_nb]],cut=threshold) boundary_points.append([x0,y0]) boundary_points = np.array(boundary_points) if len(boundary_points) > 0: b = np.ascontiguousarray(boundary_points).view(np.dtype((np.void, boundary_points.dtype.itemsize * boundary_points.shape[1]))) _, idx = np.unique(b, return_index=True) boundary_points = boundary_points[idx] # Sort the boundary_points by x-axis boundary_points = sorted(boundary_points, key=itemgetter(0)) return np.array(boundary_points)
Example 17
def calc_information_sampling(data, bins, pys1, pxs, label, b, b1, len_unique_a, p_YgX, unique_inverse_x, unique_inverse_y, calc_DKL=False): bins = bins.astype(np.float32) num_of_bins = bins.shape[0] # bins = stats.mstats.mquantiles(np.squeeze(data.reshape(1, -1)), np.linspace(0,1, num=num_of_bins)) # hist, bin_edges = np.histogram(np.squeeze(data.reshape(1, -1)), normed=True) digitized = bins[np.digitize(np.squeeze(data.reshape(1, -1)), bins) - 1].reshape(len(data), -1) b2 = np.ascontiguousarray(digitized).view( np.dtype((np.void, digitized.dtype.itemsize * digitized.shape[1]))) unique_array, unique_inverse_t, unique_counts = \ np.unique(b2, return_index=False, return_inverse=True, return_counts=True) p_ts = unique_counts / float(sum(unique_counts)) PXs, PYs = np.asarray(pxs).T, np.asarray(pys1).T if calc_DKL: pxy_given_T = np.array( [calc_probs(i, unique_inverse_t, label, b, b1, len_unique_a) for i in range(0, len(unique_array))] ) p_XgT = np.vstack(pxy_given_T[:, 0]) p_YgT = pxy_given_T[:, 1] p_YgT = np.vstack(p_YgT).T DKL_YgX_YgT = np.sum([inf_ut.KL(c_p_YgX, p_YgT.T) for c_p_YgX in p_YgX.T], axis=0) H_Xgt = np.nansum(p_XgT * np.log2(p_XgT), axis=1) local_IXT, local_ITY = calc_information_from_mat(PXs, PYs, p_ts, digitized, unique_inverse_x, unique_inverse_y, unique_array) return local_IXT, local_ITY
Example 18
def calc_by_sampling_neurons(ws_iter_index, num_of_samples, label, sigma, bins, pxs): iter_infomration = [] for j in range(len(ws_iter_index)): data = ws_iter_index[j] new_data = np.zeros((num_of_samples * data.shape[0], data.shape[1])) labels = np.zeros((num_of_samples * label.shape[0], label.shape[1])) x = np.zeros((num_of_samples * data.shape[0], 2)) for i in range(data.shape[0]): cov_matrix = np.eye(data[i, :].shape[0]) * sigma t_i = np.random.multivariate_normal(data[i, :], cov_matrix, num_of_samples) new_data[num_of_samples * i:(num_of_samples * (i + 1)), :] = t_i labels[num_of_samples * i:(num_of_samples * (i + 1)), :] = label[i, :] x[num_of_samples * i:(num_of_samples * (i + 1)), 0] = i b = np.ascontiguousarray(x).view(np.dtype((np.void, x.dtype.itemsize * x.shape[1]))) unique_array, unique_indices, unique_inverse_x, unique_counts = \ np.unique(b, return_index=True, return_inverse=True, return_counts=True) b_y = np.ascontiguousarray(labels).view(np.dtype((np.void, labels.dtype.itemsize * labels.shape[1]))) unique_array_y, unique_indices_y, unique_inverse_y, unique_counts_y = \ np.unique(b_y, return_index=True, return_inverse=True, return_counts=True) pys1 = unique_counts_y / float(np.sum(unique_counts_y)) iter_infomration.append( calc_information_for_layer(data=new_data, bins=bins, unique_inverse_x=unique_inverse_x, unique_inverse_y=unique_inverse_y, pxs=pxs, pys1=pys1)) params = np.array(iter_infomration) return params
Example 19
def extract_probs(label, x): """calculate the probabilities of the given data and labels p(x), p(y) and (y|x)""" pys = np.sum(label, axis=0) / float(label.shape[0]) b = np.ascontiguousarray(x).view(np.dtype((np.void, x.dtype.itemsize * x.shape[1]))) unique_array, unique_indices, unique_inverse_x, unique_counts = \ np.unique(b, return_index=True, return_inverse=True, return_counts=True) unique_a = x[unique_indices] b1 = np.ascontiguousarray(unique_a).view(np.dtype((np.void, unique_a.dtype.itemsize * unique_a.shape[1]))) pxs = unique_counts / float(np.sum(unique_counts)) p_y_given_x = [] for i in range(0, len(unique_array)): indexs = unique_inverse_x == i py_x_current = np.mean(label[indexs, :], axis=0) p_y_given_x.append(py_x_current) p_y_given_x = np.array(p_y_given_x).T b_y = np.ascontiguousarray(label).view(np.dtype((np.void, label.dtype.itemsize * label.shape[1]))) unique_array_y, unique_indices_y, unique_inverse_y, unique_counts_y = \ np.unique(b_y, return_index=True, return_inverse=True, return_counts=True) pys1 = unique_counts_y / float(np.sum(unique_counts_y)) return pys, pys1, p_y_given_x, b1, b, unique_a, unique_inverse_x, unique_inverse_y, pxs
Example 20
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 21
def test(filename, save_name): MEAN_VALUES = np.array([123.68, 116.78, 103.94]) MEAN_VALUES = MEAN_VALUES.reshape((1, 1, 1, 3)) image = scipy.misc.imread(filename, mode='RGB') image = scipy.misc.imresize(image, (HEIGHT,WIDTH)) h,w,d = img.shape timg = np.reshape(image, (1, h, w, 3)) - MEAN_VALUES with tf.Session() as sess: images = tf.placeholder(tf.float32, shape=[1, h, w, d]) genered = tf.nn.softmax(tf.squeeze(segMnet(images, multiplier),axis=0)) saver = tf.train.Saver(tf.global_variables()) model_file = tf.train.latest_checkpoint(MODEL_SAVE_PATH) if model_file: saver.restore(sess, model_file) else: raise Exception('Testing needs pre-trained model!') feed_dict = {images : timg} start = time.time() result = sess.run(genered,feed_dict=feed_dict) end = time.time() print ("cost time:%f"%(end-start)) unary = unary_from_softmax(result.transpose((2,0,1))) unary = np.ascontiguousarray(unary) d = dcrf.DenseCRF(h*w, 2) d.setUnaryEnergy(unary) feats = create_pairwise_gaussian(sdims=(10, 10), shape=image.shape[:2]) d.addPairwiseEnergy(feats, compat=3, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) feats = create_pairwise_bilateral(sdims=(50, 50), schan=(20, 20, 20), img=image, chdim=2) d.addPairwiseEnergy(feats, compat=10, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) Q = d.inference(50) MAP = np.argmax(Q,axis=0).reshape(h,w) img = np.zeros((h,w,4), dtype=np.int) img[:,:,0:3] = image img[:,:,3] = MAP * 255 scipy.misc.imsave(save_name, img)
Example 22
def to_coo(self, tensor_mode=False): userid, itemid, feedback = self.fields user_item_data = self.training[[userid, itemid]].values if tensor_mode: # TODO this recomputes feedback data every new functon call, # but if data has not changed - no need for this, make a property new_feedback, feedback_transform = self.reindex(self.training, feedback, inplace=False) self.index = self.index._replace(feedback=feedback_transform) idx = np.hstack((user_item_data, new_feedback[:, np.newaxis])) idx = np.ascontiguousarray(idx) val = np.ones(self.training.shape[0],) else: idx = user_item_data val = self.training[feedback].values shp = tuple(idx.max(axis=0) + 1) idx = idx.astype(np.intp) val = np.ascontiguousarray(val) return idx, val, shp
Example 23
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 24
def swap_axis_to_0(x, axis): """Insert a new singleton axis at position 0 and swap it with the specified axis. The resulting array has an additional dimension, with ``axis`` + 1 (which was ``axis`` before the insertion of the new axis) of ``x`` at position 0, and a singleton axis at position ``axis`` + 1. Parameters ---------- x : ndarray Input array axis : int Index of axis in ``x`` to swap to axis index 0. Returns ------- arr : ndarray Output array """ return np.ascontiguousarray(np.swapaxes(x[np.newaxis, ...], 0, axis+1))
Example 25
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 26
def write_tvl_direction_pairs(tvl_filename, tvl_header, direction_pairs): """Write the given directions to TVL. The direction pairs should be a list with lists containing the vector and value to write. For example: ((vec, val), (vec1, val1), ...) up to three pairs are allowed. Args: tvl_filename (str): the filename to write to tvl_header (:class:`list` or tuple): the header for the TVL file. This is a list of either 4 or 10 entries. 4 entries: [version, res, gap, offset] 10 entries: [version, x_res, x_gap, x_offset, y_res, y_gap, y_offset, z_res, z_gap, z_offset] direction_pairs (list of ndarrays): The list with direction pairs, only three are used. This is a list with (vector, magnitude) tuples in which the vectors are 4d volumes with for every voxel a 3d coordinate. """ direction_pairs = direction_pairs[0:3] dir_matrix = np.zeros(direction_pairs[0][0].shape[0:3] + (12,)) for ind, dirs in enumerate(direction_pairs): dir_matrix[..., ind*3:ind*3+3] = np.ascontiguousarray(TrackMark._ensure_3d(np.squeeze(dirs[0]))) dir_matrix[..., 9 + ind] = np.ascontiguousarray(TrackMark._ensure_3d(np.squeeze(dirs[1]))) TrackMark.write_tvl_matrix(tvl_filename, tvl_header, dir_matrix)
Example 27
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 28
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 29
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 30
def console_fill_foreground(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_foreground(con, cr, cg, cb)
Example 31
def console_fill_background(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_background(con, cr, cg, cb)
Example 32
def read_bed_chunk(filepath, nrows, ncols, row_start, row_end, col_start, col_end): X = zeros((row_end - row_start, col_end - col_start), int64) ptr = ffi.cast("uint64_t *", X.ctypes.data) strides = empty(2, int64) strides[:] = X.strides strides //= 8 e = lib.read_bed_chunk(filepath, nrows, ncols, row_start, col_start, row_end, col_end, ptr, ffi.cast("uint64_t *", strides.ctypes.data)) if e != 0: raise RuntimeError("Failure while reading BED file %s." % filepath) X = ascontiguousarray(X, float) X[X == 3] = nan return X
Example 33
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 34
def preprocess_image(img): means=[0.485, 0.456, 0.406] stds=[0.229, 0.224, 0.225] preprocessed_img = img.copy()[: , :, ::-1] for i in range(3): preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i] preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i] preprocessed_img = \ np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1))) if use_cuda: preprocessed_img_tensor = torch.from_numpy(preprocessed_img).cuda() else: preprocessed_img_tensor = torch.from_numpy(preprocessed_img) preprocessed_img_tensor.unsqueeze_(0) return Variable(preprocessed_img_tensor, requires_grad = False)
Example 35
def _sync_copyfrom(self, source_array): """Peform an synchronize copy from the array. Parameters ---------- source_array : array_like The data source we should like to copy from. """ if not isinstance(source_array, np.ndarray): try: source_array = np.array(source_array, dtype=np.float32) except: raise TypeError('array must be an array_like data,' + 'type %s is not supported' % str(type(source_array))) source_array = np.ascontiguousarray(source_array, dtype=np.float32) if source_array.shape != self.shape: raise ValueError('array shape do not match the shape of NDArray') source_arr, shape = NDArray._numpyasarray(source_array) check_call(_LIB.DLArrayCopyFromTo( ctypes.byref(source_arr), self.handle, None)) # de-allocate shape until now _ = shape
Example 36
def console_fill_foreground(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_foreground(con, cr, cg, cb)
Example 37
def console_fill_background(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int_) g = numpy.ascontiguousarray(g, dtype=numpy.int_) b = numpy.ascontiguousarray(b, dtype=numpy.int_) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_background(con, cr, cg, cb)
Example 38
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 39
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 40
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 41
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 42
def _compute_targets(rois, overlaps, labels): """Compute bounding-box regression targets for an image.""" # Indices of ground-truth ROIs gt_inds = np.where(overlaps == 1)[0] if len(gt_inds) == 0: # Bail if the image has no ground-truth ROIs return np.zeros((rois.shape[0], 5), dtype=np.float32) # Indices of examples for which we try to make predictions ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0] # Get IoU overlap between each ex ROI and gt ROI ex_gt_overlaps = bbox_overlaps( np.ascontiguousarray(rois[ex_inds, :], dtype=np.float), np.ascontiguousarray(rois[gt_inds, :], dtype=np.float)) # Find which gt ROI each ex ROI has max overlap with: # this will be the ex ROI's gt target gt_assignment = ex_gt_overlaps.argmax(axis=1) gt_rois = rois[gt_inds[gt_assignment], :] ex_rois = rois[ex_inds, :] targets = np.zeros((rois.shape[0], 5), dtype=np.float32) targets[ex_inds, 0] = labels[ex_inds] targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois) return targets
Example 43
def console_fill_foreground(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int32) g = numpy.ascontiguousarray(g, dtype=numpy.int32) b = numpy.ascontiguousarray(b, dtype=numpy.int32) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_foreground(con, cr, cg, cb)
Example 44
def console_fill_background(con,r,g,b) : if len(r) != len(g) or len(r) != len(b): raise TypeError('R, G and B must all have the same size.') if (numpy_available and isinstance(r, numpy.ndarray) and isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)): #numpy arrays, use numpy's ctypes functions r = numpy.ascontiguousarray(r, dtype=numpy.int32) g = numpy.ascontiguousarray(g, dtype=numpy.int32) b = numpy.ascontiguousarray(b, dtype=numpy.int32) cr = r.ctypes.data_as(POINTER(c_int)) cg = g.ctypes.data_as(POINTER(c_int)) cb = b.ctypes.data_as(POINTER(c_int)) else: # otherwise convert using ctypes arrays cr = (c_int * len(r))(*r) cg = (c_int * len(g))(*g) cb = (c_int * len(b))(*b) _lib.TCOD_console_fill_background(con, cr, cg, cb)
Example 45
def c_correlation(ar1,ar2,ax=0,dx=1.): lib = ctypes.cdll.LoadLibrary('/home/tulasi/P3D-PLASMA-PIC/p3dpy/helloworld.so') func = lib.c_correlation func.restype = None func.argtypes = [ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), #ar1 ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), #ar2 ctypes.c_double, #dx ctypes.c_int, #nlen ctypes.c_int, #nx ctypes.c_int, #ny ctypes.c_int, #nz ctypes.c_int, #ax ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), #r ndpointer(ctypes.c_double, flags="C_CONTIGUOUS")] #corr # nlen finds the length of the array in the specified direction nlen=np.shape(ar2)[ax]/2; nx=np.shape(ar1)[0]; ny=np.shape(ar1)[1]; nz=np.shape(ar1)[2] r=np.zeros(nlen);corr=np.zeros(nlen) func(np.ascontiguousarray(ar1),np.ascontiguousarray(ar2),dx,nlen,nx,ny,nz,ax,r,corr) # func(ar1,ar2,dx,nlen,nx,ny,nz,ax,r,corr) return r,corr
Example 46
def _scale_data_to_float32(self, data): ''' This function will convert data from local data dtype into float32, the default format of the algorithm ''' if self.data_dtype != numpy.float32: data = data.astype(numpy.float32) if self.dtype_offset != 0: data -= self.dtype_offset if numpy.any(self.gain != 1): data *= self.gain return numpy.ascontiguousarray(data)
Example 47
def _masks_as_c_order(masks): masks = masks.transpose((2, 0, 1)) masks = np.ascontiguousarray(masks) return masks
Example 48
def upload_boss_image(self, img, offset): shape = Vec(*img.shape[:3]) offset = Vec(*offset) bounds = Bbox(offset, shape + offset) if bounds.volume() < 1: raise EmptyRequestException('Requested less than one pixel of volume. {}'.format(bounds)) x_rng = [ bounds.minpt.x, bounds.maxpt.x ] y_rng = [ bounds.minpt.y, bounds.maxpt.y ] z_rng = [ bounds.minpt.z, bounds.maxpt.z ] layer_type = 'image' if self.layer_type == 'unknown' else self.layer_type chan = ChannelResource( collection_name=self.path.bucket, experiment_name=self.path.dataset, name=self.path.layer, # Channel type=layer_type, datatype=self.dtype, ) if img.shape[3] == 1: img = img.reshape( img.shape[:3] ) rmt = BossRemote(boss_credentials) img = img.T img = np.ascontiguousarray(img.astype(self.dtype)) rmt.create_cutout(chan, self.mip, x_rng, y_rng, z_rng, img)
Example 49
def __new__(cls, buf, dataset_name, layer, mip, layer_type, bounds, *args, **kwargs): return super(VolumeCutout, cls).__new__(cls, shape=buf.shape, buffer=np.ascontiguousarray(buf), dtype=buf.dtype)
Example 50
def distinct(self): b = np.ascontiguousarray(self.solutions).view(np.dtype((np.void, self.solutions.dtype.itemsize * self.P))) _, unique_ind = np.unique(b, return_index = True) unique_ind = np.sort(unique_ind) new = self.copy() new.objvals = self.objvals[unique_ind] new.solutions = self.solutions[unique_ind] return new