The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def visualiseLearnedFeatures(self): """ Visualise the features learned by the autoencoder """ import matplotlib.pyplot as plt extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture # number of rows and columns to plot (number of hidden units also stored in self._architecture) plotDims = np.rint(np.sqrt(self._architecture[1])) plt.ion() fig = plt.figure() plt.set_cmap("gnuplot") plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1) learnedFeatures = self.getLearnedFeatures() for i in range(self._architecture[1]): image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000 ax = fig.add_subplot(plotDims, plotDims, i) plt.axis("off") ax.imshow(image, interpolation="nearest") plt.show() raw_input("Program paused. Press enter to continue.")
Example 2
def visualiseLearnedFeatures(self): """ Visualise the features learned by the autoencoder """ import matplotlib.pyplot as plt extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture # number of rows and columns to plot (number of hidden units also stored in self._architecture) plotDims = np.rint(np.sqrt(self._architecture[1])) plt.ion() fig = plt.figure() plt.set_cmap("gnuplot") plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1) learnedFeatures = self.getLearnedFeatures() for i in range(self._architecture[1]): image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000 ax = fig.add_subplot(plotDims, plotDims, i) plt.axis("off") ax.imshow(image, interpolation="nearest") plt.show() raw_input("Program paused. Press enter to continue.")
Example 3
def visualiseLearnedFeatures(self): """ Visualise the features learned by the autoencoder """ import matplotlib.pyplot as plt extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture # number of rows and columns to plot (number of hidden units also stored in self._architecture) plotDims = np.rint(np.sqrt(self._architecture[1])) plt.ion() fig = plt.figure() plt.set_cmap("gnuplot") plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1) learnedFeatures = self.getLearnedFeatures() for i in range(self._architecture[1]): image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000 ax = fig.add_subplot(plotDims, plotDims, i) plt.axis("off") ax.imshow(image, interpolation="nearest") plt.show() input("Program paused. Press enter to continue.")
Example 4
def visualiseLearnedFeatures(self): """ Visualise the features learned by the autoencoder """ import matplotlib.pyplot as plt extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture # number of rows and columns to plot (number of hidden units also stored in self._architecture) plotDims = np.rint(np.sqrt(self._architecture[1])) plt.ion() fig = plt.figure() plt.set_cmap("gnuplot") plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1) learnedFeatures = self.getLearnedFeatures() for i in range(self._architecture[1]): image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000 ax = fig.add_subplot(plotDims, plotDims, i) plt.axis("off") ax.imshow(image, interpolation="nearest") plt.show() input("Program paused. Press enter to continue.")
Example 5
def visualiseLearnedFeatures(self): """ Visualise the features learned by the autoencoder """ import matplotlib.pyplot as plt extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture # number of rows and columns to plot (number of hidden units also stored in self._architecture) plotDims = np.rint(np.sqrt(self._architecture[1])) plt.ion() fig = plt.figure() plt.set_cmap("gnuplot") plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1) learnedFeatures = self.getLearnedFeatures() for i in range(self._architecture[1]): image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000 ax = fig.add_subplot(plotDims, plotDims, i) plt.axis("off") ax.imshow(image, interpolation="nearest") plt.show() raw_input("Program paused. Press enter to continue.")
Example 6
def visualiseLearnedFeatures(self): """ Visualise the features learned by the autoencoder """ import matplotlib.pyplot as plt extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture # number of rows and columns to plot (number of hidden units also stored in self._architecture) plotDims = np.rint(np.sqrt(self._architecture[1])) plt.ion() fig = plt.figure() plt.set_cmap("gnuplot") plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1) learnedFeatures = self.getLearnedFeatures() for i in range(self._architecture[1]): image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000 ax = fig.add_subplot(plotDims, plotDims, i) plt.axis("off") ax.imshow(image, interpolation="nearest") plt.show() raw_input("Program paused. Press enter to continue.")
Example 7
def visualiseLearnedFeatures(self): """ Visualise the features learned by the autoencoder """ import matplotlib.pyplot as plt extent = np.sqrt(self._architecture[0]) # size of input vector is stored in self._architecture # number of rows and columns to plot (number of hidden units also stored in self._architecture) plotDims = np.rint(np.sqrt(self._architecture[1])) plt.ion() fig = plt.figure() plt.set_cmap("gnuplot") plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=-0.6, hspace=0.1) learnedFeatures = self.getLearnedFeatures() for i in range(self._architecture[1]): image = np.reshape(learnedFeatures[i,:], (extent, extent), order="F") * 1000 ax = fig.add_subplot(plotDims, plotDims, i) plt.axis("off") ax.imshow(image, interpolation="nearest") plt.show() raw_input("Program paused. Press enter to continue.")
Example 8
def make_3d_mask(img_shape, center, radius, shape='sphere'): mask = np.zeros(img_shape) radius = np.rint(radius) center = np.rint(center) sz = np.arange(int(max(center[0] - radius, 0)), int(max(min(center[0] + radius + 1, img_shape[0]), 0))) sy = np.arange(int(max(center[1] - radius, 0)), int(max(min(center[1] + radius + 1, img_shape[1]), 0))) sx = np.arange(int(max(center[2] - radius, 0)), int(max(min(center[2] + radius + 1, img_shape[2]), 0))) sz, sy, sx = np.meshgrid(sz, sy, sx) if shape == 'cube': mask[sz, sy, sx] = 1. elif shape == 'sphere': distance2 = ((center[0] - sz) ** 2 + (center[1] - sy) ** 2 + (center[2] - sx) ** 2) distance_matrix = np.ones_like(mask) * np.inf distance_matrix[sz, sy, sx] = distance2 mask[(distance_matrix <= radius ** 2)] = 1 elif shape == 'gauss': z, y, x = np.ogrid[:mask.shape[0], :mask.shape[1], :mask.shape[2]] distance = ((z - center[0]) ** 2 + (y - center[1]) ** 2 + (x - center[2]) ** 2) mask = np.exp(- 1. * distance / (2 * radius ** 2)) mask[(distance > 3 * radius ** 2)] = 0 return mask
Example 9
def data_shuffle(data_sets_org, percent_of_train, min_test_data=80, shuffle_data=False): """Divided the data to train and test and shuffle it""" perc = lambda i, t: np.rint((i * t) / 100).astype(np.int32) C = type('type_C', (object,), {}) data_sets = C() stop_train_index = perc(percent_of_train[0], data_sets_org.data.shape[0]) start_test_index = stop_train_index if percent_of_train > min_test_data: start_test_index = perc(min_test_data, data_sets_org.data.shape[0]) data_sets.train = C() data_sets.test = C() if shuffle_data: shuffled_data, shuffled_labels = shuffle_in_unison_inplace(data_sets_org.data, data_sets_org.labels) else: shuffled_data, shuffled_labels = data_sets_org.data, data_sets_org.labels data_sets.train.data = shuffled_data[:stop_train_index, :] data_sets.train.labels = shuffled_labels[:stop_train_index, :] data_sets.test.data = shuffled_data[start_test_index:, :] data_sets.test.labels = shuffled_labels[start_test_index:, :] return data_sets
Example 10
def get_global_startindex(self): """ Return the integer starting index for each dimension at the current level. """ if self.start_index is not None: return self.start_index if self.Parent is None: iLE = self.LeftEdge - self.ds.domain_left_edge start_index = iLE / self.dds return np.rint(start_index).astype('int64').ravel() pdx = self.Parent[0].dds start_index = (self.Parent[0].get_global_startindex()) + \ np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx) self.start_index = (start_index*self.ds.refine_by).astype('int64').ravel() return self.start_index
Example 11
def get_global_startindex(self): """ Return the integer starting index for each dimension at the current level. """ if self.start_index is not None: return self.start_index if self.Parent is None: left = self.LeftEdge.d - self.ds.domain_left_edge.d start_index = left / self.dds.d return np.rint(start_index).astype('int64').ravel() pdx = self.Parent.dds.d di = np.rint((self.LeftEdge.d - self.Parent.LeftEdge.d) / pdx) start_index = self.Parent.get_global_startindex() + di self.start_index = (start_index * self.ds.refine_by).astype('int64').ravel() return self.start_index
Example 12
def _minimal_box(self, dds): LL = self.left_edge.d - self.ds.domain_left_edge.d # Nudge in case we're on the edge LL += np.finfo(np.float64).eps LS = self.right_edge.d - self.ds.domain_left_edge.d LS += np.finfo(np.float64).eps cell_start = LL / dds # This is the cell we're inside cell_end = LS / dds if self.level == 0: start_index = np.array(np.floor(cell_start), dtype="int64") end_index = np.array(np.ceil(cell_end), dtype="int64") dims = np.rint((self.ActiveDimensions * self.dds.d) / dds).astype("int64") else: # Give us one buffer start_index = np.rint(cell_start).astype('int64') - 1 # How many root cells do we occupy? end_index = np.rint(cell_end).astype('int64') dims = end_index - start_index + 1 return start_index, end_index.astype("int64"), dims.astype("int32")
Example 13
def check_tree(self): for node in self.trunk.depth_traverse(): if node.grid == -1: continue grid = self.ds.index.grids[node.grid - self._id_offset] dds = grid.dds gle = grid.LeftEdge nle = self.ds.arr(node.get_left_edge(), input_units="code_length") nre = self.ds.arr(node.get_right_edge(), input_units="code_length") li = np.rint((nle-gle)/dds).astype('int32') ri = np.rint((nre-gle)/dds).astype('int32') dims = (ri - li).astype('int32') assert(np.all(grid.LeftEdge <= nle)) assert(np.all(grid.RightEdge >= nre)) assert(np.all(dims > 0)) # print grid, dims, li, ri # Calculate the Volume vol = self.trunk.kd_sum_volume() mylog.debug('AMRKDTree volume = %e' % vol) self.trunk.kd_node_check()
Example 14
def sum_cells(self, all_cells=False): cells = 0 for node in self.trunk.depth_traverse(): if node.grid == -1: continue if not all_cells and not node.kd_is_leaf(): continue grid = self.ds.index.grids[node.grid - self._id_offset] dds = grid.dds gle = grid.LeftEdge nle = self.ds.arr(node.get_left_edge(), input_units="code_length") nre = self.ds.arr(node.get_right_edge(), input_units="code_length") li = np.rint((nle-gle)/dds).astype('int32') ri = np.rint((nre-gle)/dds).astype('int32') dims = (ri - li).astype('int32') cells += np.prod(dims) return cells
Example 15
def mujoco_to_imagespace(self, mujoco_coord, numpix=64, truncate=False): """ convert form Mujoco-Coord to numpix x numpix image space: :param numpix: number of pixels of square image :param mujoco_coord: :return: pixel_coord """ viewer_distance = .75 # distance from camera to the viewing plane window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance # window height in Mujoco coords pixelheight = window_height / numpix # height of one pixel pixelwidth = pixelheight window_width = pixelwidth * numpix middle_pixel = numpix / 2 pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) / pixelwidth + np.array([middle_pixel, middle_pixel])) pixel_coord = pixel_coord.astype(int) return pixel_coord
Example 16
def mujoco_to_imagespace(mujoco_coord, numpix=64): """ convert form Mujoco-Coord to numpix x numpix image space: :param numpix: number of pixels of square image :param mujoco_coord: :return: pixel_coord """ viewer_distance = .75 # distance from camera to the viewing plane window_height = 2 * np.tan(75 / 2 / 180. * np.pi) * viewer_distance # window height in Mujoco coords pixelheight = window_height / numpix # height of one pixel pixelwidth = pixelheight window_width = pixelwidth * numpix middle_pixel = numpix / 2 pixel_coord = np.rint(np.array([-mujoco_coord[1], mujoco_coord[0]]) / pixelwidth + np.array([middle_pixel, middle_pixel])) pixel_coord = pixel_coord.astype(int) return pixel_coord
Example 17
def get_mesh_data(self): import numpy as np letters = [chr(97+i) for i in range(26)] + [chr(65+i) for i in range(26)] mesh = " {:11d} {:d} {:11d} NEL,NDIM,NELV".format(np.prod(self.n), 3, np.prod(self.n)) for e in range(self.elements.shape[0]): ix = int(np.rint((self.elements[e,0] - self.root[0])/self.delta[0])) iy = int(np.rint((self.elements[e,4] - self.root[1])/self.delta[1])) iz = int(np.rint((self.elements[e,8] - self.root[2])/self.delta[2])) mesh += "\n ELEMENT {:11d} [{:5d}{:1s}] GROUP 0\n".format(e+1, iz+1, letters[(ix+iy*self.n[0]) % 52]) mesh += " {: 13.10E} {: 13.10E} {: 13.10E} {: 13.10E} \n".format(*(self.elements[e, 0: 4].tolist())) mesh += " {: 13.10E} {: 13.10E} {: 13.10E} {: 13.10E} \n".format(*(self.elements[e, 4: 8].tolist())) mesh += " {: 13.10E} {: 13.10E} {: 13.10E} {: 13.10E} \n".format(*(self.elements[e, 8:12].tolist())) mesh += " {: 13.10E} {: 13.10E} {: 13.10E} {: 13.10E} \n".format(*(self.elements[e,12:16].tolist())) mesh += " {: 13.10E} {: 13.10E} {: 13.10E} {: 13.10E} \n".format(*(self.elements[e,16:20].tolist())) mesh += " {: 13.10E} {: 13.10E} {: 13.10E} {: 13.10E} ".format(*(self.elements[e,20:24].tolist())) return mesh
Example 18
def visualize_2D_trip(self,trip,tw_open,tw_close): plt.figure(figsize=(30,30)) rcParams.update({'font.size': 22}) # Plot cities colors = ['red'] # Depot is first city for i in range(len(tw_open)-1): colors.append('blue') plt.scatter(trip[:,0], trip[:,1], color=colors, s=200) # Plot tour tour=np.array(list(range(len(trip))) + [0]) X = trip[tour, 0] Y = trip[tour, 1] plt.plot(X, Y,"--", markersize=100) # Annotate cities with TW tw_open = np.rint(tw_open) tw_close = np.rint(tw_close) time_window = np.concatenate((tw_open,tw_close),axis=1) for tw, (x, y) in zip(time_window,(zip(X,Y))): plt.annotate(tw,xy=(x, y)) plt.xlim(0,60) plt.ylim(0,60) plt.show() # Heatmap of permutations (x=cities; y=steps)
Example 19
def corrHist(positions): g = np.zeros(config.histSteps); for p1 in range(1,config.nParticles): for p2 in range(p1): X = positions[p2,0] - positions[p1,0]; Y = positions[p2,1] - positions[p1,1]; Z = positions[p2,2] - positions[p1,2]; X -= np.rint(X/config.lCalc) * config.lCalc; Y -= np.rint(Y/config.lCalc) * config.lCalc; Z -= np.rint(Z/config.lCalc) * config.lCalc; distance = np.sqrt(X*X + Y*Y + Z*Z); for i in range(config.histSteps): if( (config.histRange/config.histSteps) * i < distance < (config.histRange/config.histSteps) * (i+1) ): g[i] += 1 / ( 4 * np.pi * ((config.histRange/config.histSteps*i)**2) * (config.histRange/config.histSteps) ); break; g = g * 2 * (config.lCalc**3) / (config.nParticles*(config.nParticles-1)); return g
Example 20
def create_little_group(self, kpoint): rotations = self._symmetry_operations["rotations"] translations = self._symmetry_operations["translations"] lattice = self._cell.get_cell() rotations_kpoint = [] translations_kpoint = [] for r, t in zip(rotations, translations): diff = np.dot(kpoint, r) - kpoint diff -= np.rint(diff) dist = np.linalg.norm(np.dot(np.linalg.inv(lattice), diff)) if dist < self._symprec: rotations_kpoint.append(r) translations_kpoint.append(t) return np.array(rotations_kpoint), np.array(translations_kpoint)
Example 21
def configure(self, bin_width_s, record_length_s, number_of_gates = 0): """ Configuration of the fast counter. @param float bin_width_s: Length of a single time bin in the time trace histogram in seconds. @param float record_length_s: Total length of the timetrace/each single gate in seconds. @param int number_of_gates: optional, number of gates in the pulse sequence. Ignore for not gated counter. @return tuple(binwidth_s, gate_length_s, number_of_gates): binwidth_s: float the actual set binwidth in seconds gate_length_s: the actual set gate length in seconds number_of_gates: the number of gated, which are accepted """ self._binwidth = int(np.rint(bin_width_s * 1e9 * 950 / 1000)) self._gate_length_bins = int(np.rint(record_length_s / bin_width_s)) actual_binwidth = self._binwidth * 1000 / 950e9 actual_length = self._gate_length_bins * actual_binwidth self.statusvar = 1 return actual_binwidth, actual_length, number_of_gates
Example 22
def _set_dac_voltages(self): """ """ with self.threadlock: dac_sma_mapping = {1: 1, 2: 5, 3: 2, 4: 6, 5: 3, 6: 7, 7: 4, 8: 8} set_voltage_cmd = 0x03000000 for dac_chnl in range(8): sma_chnl = dac_sma_mapping[dac_chnl+1] dac_value = int(np.rint(4096*self._switching_voltage[sma_chnl]/(2.5*2))) if dac_value > 4095: dac_value = 4095 elif dac_value < 0: dac_value = 0 tmp_cmd = set_voltage_cmd + (dac_chnl << 20) + (dac_value << 8) self._fpga.SetWireInValue(0x01, tmp_cmd) self._fpga.UpdateWireIns() self._fpga.ActivateTriggerIn(0x41, 0) return
Example 23
def process1(self, sliced): global advance bitsamples = self.rate / float(self.baud) flagsamples = bitsamples * 9 # HDLC 01111110 flag (9 b/c NRZI) ff = self.findflag(sliced[0:int(round(flagsamples+advance*bitsamples+2))]) if ff != None: indices = numpy.arange(0, len(sliced) - (ff+2*bitsamples), bitsamples) indices = indices + (ff + 0.5*bitsamples) indices = numpy.rint(indices).astype(int) rawsymbols = sliced[indices] symbols = numpy.where(rawsymbols > 0, 1, -1) [ ok, msg, nsymbols ] = self.finishframe(symbols[8:]) if ok >= 1: return [ ok, msg, nsymbols, ff ] return [ 0, None, 0, 0 ]
Example 24
def opencv_wrapper(imgs, opencv_func, argv): ret_imgs = [] imgs_copy = imgs if imgs.shape[3] == 1: imgs_copy = np.squeeze(imgs) for img in imgs_copy: img_uint8 = np.clip(np.rint(img * 255), 0, 255).astype(np.uint8) ret_img = opencv_func(*[img_uint8]+argv) if type(ret_img) == tuple: ret_img = ret_img[1] ret_img = ret_img.astype(np.float32) / 255. ret_imgs.append(ret_img) ret_imgs = np.stack(ret_imgs) if imgs.shape[3] == 1: ret_imgs = np.expand_dims(ret_imgs, axis=3) return ret_imgs # Binary filters.
Example 25
def peak_interval(data, alpha=_alpha, npoints=_npoints): """ Identify interval using Gaussian kernel density estimator. """ peak = kde_peak(data,npoints) x = np.sort(data.flat); n = len(x) # The number of entries in the interval window = int(np.rint((1.0-alpha)*n)) # The start, stop, and width of all possible intervals starts = x[:n-window]; ends = x[window:] widths = ends - starts # Just the intervals containing the peak select = (peak >= starts) & (peak <= ends) widths = widths[select] if len(widths) == 0: raise ValueError('Too few elements for interval calculation') min_idx = np.argmin(widths) lo = x[min_idx] hi = x[min_idx+window] return interval(peak,lo,hi)
Example 26
def rescale(self, function): """ perform raster computations with custom functions and assign them to the exitsting raster object in memory Args: function: Returns: """ if self.bands != 1: raise ValueError('only single band images supported') # load array mat = self.matrix() # scale values scaled = function(mat) # round to nearest integer rounded = np.rint(scaled) # assign newly computed array to raster object self.assign(rounded)
Example 27
def zoom_image(image, zoom, out_width=25): """Return rescaled and cropped image array with width out_width. """ if zoom < 1: raise ValueError("Zoom scale factor must be at least 1.") width, height = image.shape #if width < out_width: # raise ValueError( # "image width before zooming ({0}) is less " # "than requested output width ({1})".format(width, out_width)) out_height = int(np.rint(float(out_width * height) / width)) t_width = int(np.rint(out_width * zoom)) t_height = int(np.rint(out_height * zoom)) if t_width // 2 != out_width // 2: t_width += 1 if t_height // 2 != out_height // 2: t_height += 1 # zoom with cubic interpolation t_image = transform.resize(image, (t_width, t_height), order=3) # crop return t_image[(t_width - out_width) / 2:(t_width + out_width) / 2, (t_height - out_height) / 2:(t_height + out_height) / 2]
Example 28
def read_images(path): for subdir, dirs, files in os.walk(path): dcms = glob.glob(os.path.join(subdir, '*.dcm')) if len(dcms) > 1: slices = [dicom.read_file(dcm) for dcm in dcms] slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) images = np.stack([s.pixel_array for s in slices], axis=0).astype(np.float32) images = images + slices[0].RescaleIntercept images = normalize(images) inplane_scale = slices[0].PixelSpacing[0] / PIXEL_SPACING inplane_size = int(np.rint(inplane_scale * slices[0].Rows / 2) * 2) z_scale = slices[0].SliceThickness / SLICE_THICKNESS z_size = int(np.rint(z_scale * images.shape[0])) if inplane_size != INPLANE_SIZE or z_scale != 1: images = resize(images, (z_size, inplane_size, inplane_size), mode='constant') if inplane_size != INPLANE_SIZE: if inplane_size > INPLANE_SIZE: crop = int((inplane_size - INPLANE_SIZE) / 2) images = images[:, crop : crop + INPLANE_SIZE, crop : crop + INPLANE_SIZE] else: pad = int((INPLANE_SIZE - new_size) / 2) images = np.pad(images, ((0, 0), (pad, pad), (pad, pad))) return images
Example 29
def read_images_labels(path): # Read the images and labels from a folder containing both dicom files for subdir, dirs, files in os.walk(path): dcms = glob.glob(os.path.join(subdir, '*.dcm')) if len(dcms) == 1: structure = dicom.read_file(dcms[0]) contours = read_structure(structure) elif len(dcms) > 1: slices = [dicom.read_file(dcm) for dcm in dcms] slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) images = np.stack([s.pixel_array for s in slices], axis=0).astype(np.float32) images = images + slices[0].RescaleIntercept images = normalize(images) labels = get_labels(contours, images.shape, slices) inplane_scale = slices[0].PixelSpacing[0] / PIXEL_SPACING inplane_size = int(np.rint(inplane_scale * slices[0].Rows / 2) * 2) z_scale = slices[0].SliceThickness / SLICE_THICKNESS z_size = int(np.rint(z_scale * images.shape[0])) if inplane_size != INPLANE_SIZE or z_scale != 1: images = resize(images, (z_size, inplane_size, inplane_size), mode='constant') new_labels = np.zeros_like(images, dtype=np.float32) for z in range(N_CLASSES): roi = resize((labels == z + 1).astype(np.float32), (z_size, inplane_size, inplane_size), mode='constant') new_labels[roi >= 0.5] = z + 1 labels = new_labels if inplane_size != INPLANE_SIZE: if inplane_size > INPLANE_SIZE: crop = int((inplane_size - INPLANE_SIZE) / 2) images = images[:, crop : crop + INPLANE_SIZE, crop : crop + INPLANE_SIZE] labels = labels[:, crop : crop + INPLANE_SIZE, crop : crop + INPLANE_SIZE] else: pad = int((INPLANE_SIZE - new_size) / 2) images = np.pad(images, ((0, 0), (pad, pad), (pad, pad)), 'constant') labels = np.pad(labels, ((0, 0), (pad, pad), (pad, pad)), 'constant') return images, labels
Example 30
def read_testing_inputs(file, roi, im_size, output_path=None): f_h5 = h5py.File(file, 'r') if roi == -1: images = np.asarray(f_h5['resized_images'], dtype=np.float32) read_info = {} read_info['shape'] = np.asarray(f_h5['images'], dtype=np.float32).shape else: images = np.asarray(f_h5['images'], dtype=np.float32) output = h5py.File(os.path.join(output_path, 'All_' + os.path.basename(file)), 'r') predictions = np.asarray(output['predictions'], dtype=np.float32) output.close() # Select the roi roi_labels = (predictions == roi + 1).astype(np.float32) nz = np.nonzero(roi_labels) extract = [] for c in range(3): start = np.amin(nz[c]) end = np.amax(nz[c]) r = end - start extract.append((np.maximum(int(np.rint(start - r * 0.1)), 0), np.minimum(int(np.rint(end + r * 0.1)), images.shape[c]))) extract_images = images[extract[0][0] : extract[0][1], extract[1][0] : extract[1][1], extract[2][0] : extract[2][1]] read_info = {} read_info['shape'] = images.shape read_info['extract_shape'] = extract_images.shape read_info['extract'] = extract images = resize(extract_images, im_size, mode='constant') f_h5.close() return images, read_info
Example 31
def read_images_info(path): for subdir, dirs, files in os.walk(path): dcms = glob.glob(os.path.join(subdir, '*.dcm')) if len(dcms) > 1: slices = [dicom.read_file(dcm) for dcm in dcms] slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) images = np.stack([s.pixel_array for s in slices], axis=0).astype(np.float32) images = images + slices[0].RescaleIntercept orig_shape = images.shape inplane_scale = slices[0].PixelSpacing[0] / PIXEL_SPACING inplane_size = int(np.rint(inplane_scale * slices[0].Rows / 2) * 2) return orig_shape, inplane_size
Example 32
def time_slice(self, t_start, t_stop): ''' Creates a new AnalogSignal corresponding to the time slice of the original AnalogSignal between times t_start, t_stop. Note, that for numerical stability reasons if t_start, t_stop do not fall exactly on the time bins defined by the sampling_period they will be rounded to the nearest sampling bins. ''' # checking start time and transforming to start index if t_start is None: i = 0 else: t_start = t_start.rescale(self.sampling_period.units) i = (t_start - self.t_start) / self.sampling_period i = int(np.rint(i.magnitude)) # checking stop time and transforming to stop index if t_stop is None: j = len(self) else: t_stop = t_stop.rescale(self.sampling_period.units) j = (t_stop - self.t_start) / self.sampling_period j = int(np.rint(j.magnitude)) if (i < 0) or (j > len(self)): raise ValueError('t_start, t_stop have to be withing the analog \ signal duration') # we're going to send the list of indicies so that we get *copy* of the # sliced data obj = super(AnalogSignal, self).__getitem__(np.arange(i, j, 1)) obj.t_start = self.t_start + i * self.sampling_period return obj
Example 33
def time_slice(self, t_start, t_stop): ''' Creates a new AnalogSignal corresponding to the time slice of the original AnalogSignal between times t_start, t_stop. Note, that for numerical stability reasons if t_start, t_stop do not fall exactly on the time bins defined by the sampling_period they will be rounded to the nearest sampling bins. ''' # checking start time and transforming to start index if t_start is None: i = 0 else: t_start = t_start.rescale(self.sampling_period.units) i = (t_start - self.t_start) / self.sampling_period i = int(np.rint(i.magnitude)) # checking stop time and transforming to stop index if t_stop is None: j = len(self) else: t_stop = t_stop.rescale(self.sampling_period.units) j = (t_stop - self.t_start) / self.sampling_period j = int(np.rint(j.magnitude)) if (i < 0) or (j > len(self)): raise ValueError('t_start, t_stop have to be withing the analog \ signal duration') # we're going to send the list of indicies so that we get *copy* of the # sliced data obj = super(AnalogSignal, self).__getitem__(np.arange(i, j, 1)) obj.t_start = self.t_start + i * self.sampling_period return obj
Example 34
def getJitteredImgs(self, img, num, maxRot=(-5.0, 5.0), maxTranslate=(-2.0, 2.0), maxScale=(-0.1, 0.1), augmentColor=False): """ Take img and jitter it :return: a list of all jittered images """ cx = img.size[0] / 2 cy = img.size[1] / 2 tMats = self.getJitteredParams(center=(cx, cy), num=num, maxRot=maxRot, maxTranslate=maxTranslate, maxScale=maxScale) imgs = [] for i in range(len(tMats)): t = tMats[i] imgT = self.transformImg(img, t) if augmentColor: # jitter colors color = ImageEnhance.Color(imgT) imgT = color.enhance(self.rng.uniform(0.7, 1)) # jitter contrast contr = ImageEnhance.Contrast(imgT) imgT = contr.enhance(self.rng.uniform(0.7, 1)) # jitter brightness bright = ImageEnhance.Brightness(imgT) imgT = bright.enhance(self.rng.uniform(0.7, 1)) # add noise im = numpy.asarray(imgT).astype('int') + numpy.rint(self.rng.normal(0, 4, numpy.asarray(imgT).shape)).astype('int') im = numpy.clip(im, 0, 255).astype('uint8') imgT = Image.fromarray(im) # add image imgs.append(imgT) return imgs, tMats
Example 35
def data_prep_function(data, luna_annotations, pixel_spacing, luna_origin, p_transform=p_transform, p_transform_augment=None): # make sure the data is processed the same way lung_mask = lung_segmentation.segment_HU_scan_ira(data) annotatations_out = [] for zyxd in luna_annotations: zyx = np.array(zyxd[:3]) voxel_coords = utils_lung.world2voxel(zyx, luna_origin, pixel_spacing) zyxd_out = np.rint(np.append(voxel_coords, zyxd[-1])) annotatations_out.append(zyxd_out) annotatations_out = np.asarray(annotatations_out) return lung_mask, lung_mask, lung_mask, annotatations_out, None
Example 36
def processMatrix(self): self._transformedMin = numpy.array([999999999999,999999999999,999999999999], numpy.float64) self._transformedMax = numpy.array([-999999999999,-999999999999,-999999999999], numpy.float64) self._boundaryCircleSize = 0 hull = numpy.zeros((0, 2), numpy.int) for m in self._meshList: transformedVertexes = m.getTransformedVertexes() hull = polygon.convexHull(numpy.concatenate((numpy.rint(transformedVertexes[:,0:2]).astype(int), hull), 0)) transformedMin = transformedVertexes.min(0) transformedMax = transformedVertexes.max(0) for n in xrange(0, 3): self._transformedMin[n] = min(transformedMin[n], self._transformedMin[n]) self._transformedMax[n] = max(transformedMax[n], self._transformedMax[n]) #Calculate the boundary circle transformedSize = transformedMax - transformedMin center = transformedMin + transformedSize / 2.0 boundaryCircleSize = round(math.sqrt(numpy.max(((transformedVertexes[::,0] - center[0]) * (transformedVertexes[::,0] - center[0])) + ((transformedVertexes[::,1] - center[1]) * (transformedVertexes[::,1] - center[1])) + ((transformedVertexes[::,2] - center[2]) * (transformedVertexes[::,2] - center[2])))), 3) self._boundaryCircleSize = max(self._boundaryCircleSize, boundaryCircleSize) self._transformedSize = self._transformedMax - self._transformedMin self._drawOffset = (self._transformedMax + self._transformedMin) / 2 self._drawOffset[2] = self._transformedMin[2] self._transformedMax -= self._drawOffset self._transformedMin -= self._drawOffset self._boundaryHull = polygon.minkowskiHull((hull.astype(numpy.float32) - self._drawOffset[0:2]), numpy.array([[-1,-1],[-1,1],[1,1],[1,-1]],numpy.float32)) self._printAreaHull = polygon.minkowskiHull(self._boundaryHull, self._printAreaExtend) self.setHeadArea(self._headAreaExtend, self._headMinSize)
Example 37
def generate_patch_locations(patches, patch_size, im_size): nx = round((patches * 8 * im_size[0] * im_size[0] / im_size[1] / im_size[2]) ** (1.0 / 3)) ny = round(nx * im_size[1] / im_size[0]) nz = round(nx * im_size[2] / im_size[0]) x = np.rint(np.linspace(patch_size, im_size[0] - patch_size, num=nx)) y = np.rint(np.linspace(patch_size, im_size[1] - patch_size, num=ny)) z = np.rint(np.linspace(patch_size, im_size[2] - patch_size, num=nz)) return x, y, z
Example 38
def perturb_patch_locations(patch_locations, radius): x, y, z = patch_locations x = np.rint(x + np.random.uniform(-radius, radius, len(x))) y = np.rint(y + np.random.uniform(-radius, radius, len(y))) z = np.rint(z + np.random.uniform(-radius, radius, len(z))) return x, y, z
Example 39
def test_rint_big_int(): # np.rint bug for large integer values on Windows 32-bit and MKL # https://github.com/numpy/numpy/issues/6685 val = 4607998452777363968 # This is exactly representable in floating point assert_equal(val, int(float(val))) # Rint should not change the value assert_equal(val, np.rint(val))
Example 40
def _normalize_shape(ndarray, shape, cast_to_int=True): ndims = ndarray.ndim if shape is None: return ((None, None), ) * ndims ndshape = numpy.asarray(shape) if ndshape.size == 1: ndshape = numpy.repeat(ndshape, 2) if ndshape.ndim == 1: ndshape = numpy.tile(ndshape, (ndims, 1)) if ndshape.shape != (ndims, 2): message = 'Unable to create correctly shaped tuple from %s' % shape raise ValueError(message) if cast_to_int: ndshape = numpy.rint(ndshape).astype(int) return tuple(tuple(axis) for axis in ndshape)
Example 41
def calc_kappa(self, train_pred, dev_pred, test_pred, weight='quadratic'): train_pred_int = np.rint(train_pred).astype('int32') dev_pred_int = np.rint(dev_pred).astype('int32') test_pred_int = np.rint(test_pred).astype('int32') self.train_qwk = kappa(self.train_y_org, train_pred, weight) self.dev_qwk = kappa(self.dev_y_org, dev_pred, weight) self.test_qwk = kappa(self.test_y_org, test_pred, weight)
Example 42
def sanitise_image(mat): int_mat = np.rint(mat).astype(int) return np.clip(int_mat, 0, 255).astype(np.uint8)
Example 43
def extract_RGB_LBP_features(image, labels, size=5, P=8, R=2): n_sp = np.max(labels)+1 hs = size//2 img_superpixel = np.zeros_like(labels, dtype='int') # calculate lbp for entire region lbp_img = np.empty((3, ), dtype='object') for d in range(3): lbp_img[d] = local_binary_pattern(image[..., d], P=P, R=R, method='uniform') feat_desc_size = P+1 feat_descs = np.zeros((n_sp, feat_desc_size*3)) for i in range(n_sp): # get centroid of i'th superpixel img_superpixel[:] = labels == i cy, cx = [np.rint(x).astype('int') for x in regionprops(img_superpixel)[0].centroid] # extract lbp values in sizeXsize region centred on the centroid x0, y0, x1, y1 = cx-hs, cy-hs, cx+hs+1, cy+hs+1 # clip to boundaries of image x0 = 0 if x0 < 0 else x0 y0 = 0 if y0 < 0 else y0 x1 = image.shape[1]-1 if x1 > image.shape[1]-2 else x1 y1 = image.shape[0]-1 if y1 > image.shape[0]-2 else y1 # fill in the feature vector for each image channel for d in range(3): j, k = d*feat_desc_size, (1+d)*feat_desc_size patch = lbp_img[d][y0:y1, x0:x1].flat fv = np.histogram(patch, bins=np.arange(0, feat_desc_size+1), range=(0, feat_desc_size+1))[0] feat_descs[i, j:k] = fv return feat_descs
Example 44
def get_search_region(bbox, frame, ratio): """ Calculates coordinates of ratio*width/height of axis-aligned bbox, centred on the original bbox, constrained by the original size of the image. Arguments: bbox = axis-aligned bbox of the form [x0, y0, width, height] frame = MxNxD Image to constrain bbox by ratio = ratio at which to change bbox dimensions by Output: x0, y0, x1, y1 = Coordinates of expanded axis-aligned bbox """ x0, y0, w, h = bbox ih, iw = frame.shape[:2] ww, hh = ratio*w, ratio*h # expand bbox by ratio x1 = np.min([iw-1, x0 + w/2 + ww/2]) y1 = np.min([ih-1, y0 + h/2 + hh/2]) x0 = np.max([0, x0 + w/2 - ww/2]) y0 = np.max([0, y0 + h/2 - hh/2]) x0, y0, x1, y1 = np.rint(np.array([x0, y0, x1, y1])).astype('int') return x0, y0, x1, y1
Example 45
def worldToVoxelCoord(worldCoord, origin, spacing): """ only valid if there is no rotation component """ voxelCoord = np.rint((worldCoord-origin)/ spacing).astype(np.int); return voxelCoord
Example 46
def calc_all_sigams(data, sigmas): batchs = 128 num_of_bins = 8 # bins = np.linspace(-1, 1, num_of_bins).astype(np.float32) # bins = stats.mstats.mquantiles(np.squeeze(data.reshape(1, -1)), np.linspace(0,1, num=num_of_bins)) # data = bins[np.digitize(np.squeeze(data.reshape(1, -1)), bins) - 1].reshape(len(data), -1) batch_points = np.rint(np.arange(0, data.shape[0] + 1, batchs)).astype(dtype=np.int32) I_XT = [] num_of_rand = min(800, data.shape[1]) for sigma in sigmas: # print sigma I_XT_temp = 0 for i in range(0, len(batch_points) - 1): new_data = data[batch_points[i]:batch_points[i + 1], :] rand_indexs = np.random.randint(0, new_data.shape[1], num_of_rand) new_data = new_data[:, :] N = new_data.shape[0] d = new_data.shape[1] diff_mat = np.linalg.norm(((new_data[:, np.newaxis, :] - new_data)), axis=2) # print diff_mat.shape, new_data.shape s0 = 0.2 # DOTO -add leaveoneout validation res = minimize(optimiaze_func, s0, args=(diff_mat, d, N), method='nelder-mead', options={'xtol': 1e-8, 'disp': False, 'maxiter': 6}) eta = res.x diff_mat0 = - 0.5 * (diff_mat / (sigma ** 2 + eta ** 2)) diff_mat1 = np.sum(np.exp(diff_mat0), axis=0) diff_mat2 = -(1.0 / N) * np.sum(np.log2((1.0 / N) * diff_mat1)) I_XT_temp += diff_mat2 - d * np.log2((sigma ** 2) / (eta ** 2 + sigma ** 2)) # print diff_mat2 - d*np.log2((sigma**2)/(eta**2+sigma**2)) I_XT_temp /= len(batch_points) I_XT.append(I_XT_temp) sys.stdout.flush() return I_XT
Example 47
def test(mfault): from clawpack.clawutil.data import ClawData length_scale = 1.e-3 # m to km probdata = ClawData() probdata.read('setprob.data',force=True) fault = dtopotools.Fault() fault.read('fault.data') mapping = Mapping(fault) domain_depth = probdata.domain_depth domain_width = probdata.domain_width # num of cells here determined in a similar fashion to that in setrun.py dx = mapping.fault_width/mfault num_cells_above = numpy.rint(mapping.fault_depth/dx) dy = mapping.fault_depth/num_cells_above mx = int(numpy.ceil(domain_width/dx)) # mx my = int(numpy.ceil(domain_depth/dy)) # my mr = mx - mfault x = linspace(mapping.xcenter-0.5*mapping.fault_width - numpy.floor(mr/2.0)*dx, mapping.xcenter+0.5*mapping.fault_width + numpy.ceil(mr/2.0)*dx, mx+1) y = linspace(-my*dy, 0.0, my+1) xc,yc = meshgrid(x,y) xp,yp = mapping.mapc2p(xc,yc) figure() plot(xp*length_scale,yp*length_scale,'k-') plot(xp.T*length_scale,yp.T*length_scale,'k-') plot((mapping.xp1*length_scale,mapping.xp2*length_scale), (mapping.yp1*length_scale,mapping.yp2*length_scale),'-g',linewidth=3) axis('scaled')
Example 48
def test(mfault): from clawpack.clawutil.data import ClawData probdata = ClawData() probdata.read('setprob.data',force=True) fault = dtopotools.Fault(coordinate_specification='top_center') fault.read('fault.data') mapping = Mapping(fault) domain_depth = probdata.domain_depth domain_width = probdata.domain_width # num of cells here determined in a similar fashion to that in setrun.py dx = mapping.fault_width/mfault num_cells_above = numpy.rint(mapping.fault_depth/dx) dy = mapping.fault_depth/num_cells_above mx = int(numpy.ceil(domain_width/dx)) # mx my = int(numpy.ceil(domain_depth/dy)) # my mr = mx - mfault x = linspace(mapping.xcenter-0.5*mapping.fault_width - numpy.floor(mr/2.0)*dx, mapping.xcenter+0.5*mapping.fault_width + numpy.ceil(mr/2.0)*dx, mx+1) y = linspace(-my*dy, 0.0, my+1) xc,yc = meshgrid(x,y) xp,yp = mapping.mapc2p(xc,yc) figure() plot(xp,yp,'k-') plot(xp.T,yp.T,'k-') plot((mapping.xp1,mapping.xp2),(mapping.yp1,mapping.yp2),'-g') axis('scaled')
Example 49
def topological_defect_array(orientation_field): """ Returns a matrix of topological defects for the given orientation field. Each entry in the matrix is the charge of the defect. """ JX = np.diff(orientation_field, axis=0) JY = np.diff(orientation_field, axis=1) JX += math.pi * (JX < -math.pi/2.0 ) - math.pi * (JX > math.pi/2.0) JY += math.pi * (JY < -math.pi/2.0 ) - math.pi * (JY > math.pi/2.0) return np.rint((np.diff(JY, axis=0) - np.diff(JX, axis=1))/math.pi)
Example 50
def cumulative_score(ground_truth, estimation, largest_error, integer_rounding=True): if len(ground_truth) != len(estimation): er = "ground_truth and estimation have different number of elements" raise Exception(er) if integer_rounding: _estimation = numpy.rint(estimation) else: _estimation = estimation N_e_le_j = (numpy.absolute(_estimation - ground_truth) <= largest_error).sum() return N_e_le_j * 1.0 / len(ground_truth)