The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def _cascade_evaluation(self, X_test, y_test): """ Evaluate the accuracy of the cascade using X and y. :param X_test: np.array Array containing the test input samples. Must be of the same shape as training data. :param y_test: np.array Test target values. :return: float the cascade accuracy. """ casc_pred_prob = np.mean(self.cascade_forest(X_test), axis=0) casc_pred = np.argmax(casc_pred_prob, axis=1) casc_accuracy = accuracy_score(y_true=y_test, y_pred=casc_pred) print('Layer validation accuracy = {}'.format(casc_accuracy)) return casc_accuracy
Example 2
def _create_feat_arr(self, X, prf_crf_pred): """ Concatenate the original feature vector with the predicition probabilities of a cascade layer. :param X: np.array Array containing the input samples. Must be of shape [n_samples, data] where data is a 1D array. :param prf_crf_pred: list Prediction probabilities by a cascade layer for X. :return: np.array Concatenation of X and the predicted probabilities. To be used for the next layer in a cascade forest. """ swap_pred = np.swapaxes(prf_crf_pred, 0, 1) add_feat = swap_pred.reshape([np.shape(X)[0], -1]) feat_arr = np.concatenate([add_feat, X], axis=1) return feat_arr
Example 3
def fit(self, X, y): """ Training the gcForest on input data X and associated target y. :param X: np.array Array containing the input samples. Must be of shape [n_samples, data] where data is a 1D array. :param y: np.array 1D array containing the target values. Must be of shape [n_samples] """ if np.shape(X)[0] != len(y): raise ValueError('Sizes of y and X do not match.') mgs_X = self.mg_scanning(X, y) _ = self.cascade_forest(mgs_X, y)
Example 4
def postProcess(PDFeatures1,which): PDFeatures2 = np.copy(PDFeatures1) cols = np.shape(PDFeatures2)[1] for x in xrange(cols): indinf = np.where(np.isinf(PDFeatures2[:,x])==True)[0] if len(indinf) > 0: PDFeatures2[indinf,x] = 0 indnan = np.where(np.isnan(PDFeatures2[:,x])==True)[0] if len(indnan) > 0: PDFeatures2[indnan,x] = 0 indLN = np.where(PDFeatures2[:,0] < -1)[0] for x in indLN: PDFeatures2[x,0] = np.random.uniform(-0.75,-0.99,1) term1 = (PDFeatures2[:,2]+PDFeatures2[:,3]+PDFeatures2[:,5])/3. print term1 PDFeatures2[:,1] = 1.-term1 print "PDF",PDFeatures2[:,1] return PDFeatures2
Example 5
def get_batch(): ran = random.randint(600, data_size) #print(ran) image = [] label = [] label_0 = [] n_pic = ran # print(n_pic) for i in range(batch_size * n_steps): frame_0 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic+i), 0) frame_0 = cv2.resize(frame_0, (LONGITUDE, LONGITUDE)) frame_0 = np.array(frame_0).reshape(-1) image.append(frame_0) #print(np.shape(image)) for i in range(batch_size): frame_1 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic + batch_size * (i+1) ), 0) frame_1 = cv2.resize(frame_1, (LONGITUDE, LONGITUDE)) frame_1 = np.array(frame_1).reshape(-1) label.append(frame_1) for i in range(batch_size): frame_2 = cv2.imread('./cropedoriginalUS2/%d.jpg' % (n_pic + batch_size * (i+1) ), 0) frame_2 = cv2.resize(frame_2, (LONGITUDE, LONGITUDE)) frame_2 = np.array(frame_2).reshape(-1) label_0.append(frame_2) return image , label , label_0
Example 6
def get_train_batch(noise=0): ran = random.randint(600, data_size) #print(ran) image = [] label = [] label_0 = [] n_pic = ran # print(n_pic) for i in range(batch_size ): frame_0 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic+i), 0) frame_0 = add_noise(frame_0, n = noise) frame_0 = cv2.resize(frame_0, (LONGITUDE, LONGITUDE)) frame_0 = np.array(frame_0).reshape(-1) image.append(frame_0) #print(np.shape(image)) for i in range(batch_size): frame_1 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic + batch_size * (i+1) ), 0) frame_1 = cv2.resize(frame_1, (LONGITUDE, LONGITUDE)) frame_1 = np.array(frame_1).reshape(-1) label.append(frame_1) return image , label
Example 7
def get_train_batch(noise=500): ran = np.random.randint(600,5800,size=10,dtype='int') #print(ran) image = [] label = [] label_0 = [] n_pic = ran # print(n_pic) for i in range(10): frame_0 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic[i]), 0) frame_0 = add_noise(frame_0, n = noise) frame_0 = cv2.resize(frame_0, (24, 24)) frame_0 = np.array(frame_0).reshape(-1) frame_0 = frame_0 / 255.0 image.append(frame_0) #print(np.shape(image)) for i in range(10): frame_1 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic[i]), 0) frame_1 = cv2.resize(frame_1, (24, 24)) frame_1 = np.array(frame_1).reshape(-1) frame_1 = gray2binary(frame_1) label.append(frame_1) return np.array(image,dtype='float') , np.array(label,dtype='float')
Example 8
def get_test_batch(noise=500): ran = np.random.randint(5800,6000,size=10,dtype='int') #print(ran) image = [] label = [] label_0 = [] n_pic = ran # print(n_pic) for i in range(10): frame_0 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic[i]), 0) frame_0 = add_noise(frame_0, n = noise) frame_0 = cv2.resize(frame_0, (24, 24)) frame_0 = np.array(frame_0).reshape(-1) frame_0 = frame_0 / 255.0 image.append(frame_0) #print(np.shape(image)) for i in range(10): frame_1 = cv2.imread('./cropedoriginalPixel2/%d.jpg' % (n_pic[i]), 0) frame_1 = cv2.resize(frame_1, (24, 24)) frame_1 = np.array(frame_1).reshape(-1) frame_1 = gray2binary(frame_1) label.append(frame_1) return np.array(image,dtype='float') , np.array(label,dtype='float')
Example 9
def get_data(datadir): #datadir = args.data # assume each image is 512x256 split to left and right imgs = glob.glob(os.path.join(datadir, '*.jpg')) data_X = np.zeros((len(imgs),3,img_cols,img_rows)) data_Y = np.zeros((len(imgs),3,img_cols,img_rows)) i = 0 for file in imgs: img = cv2.imread(file,cv2.IMREAD_COLOR) img = cv2.resize(img, (img_cols*2, img_rows)) #print('{} {},{}'.format(i,np.shape(img)[0],np.shape(img)[1])) img = np.swapaxes(img,0,2) X, Y = split_input(img) data_X[i,:,:,:] = X data_Y[i,:,:,:] = Y i = i+1 return data_X, data_Y
Example 10
def load_solar_data(): with open('solar label.csv', 'r') as csvfile: reader = csv.reader(csvfile) rows = [row for row in reader] labels = np.array(rows, dtype=int) print(shape(labels)) with open('solar.csv', 'r') as csvfile: reader = csv.reader(csvfile) rows = [row for row in reader] rows = np.array(rows, dtype=float) rows=rows[:104832,:] print(shape(rows)) trX = np.reshape(rows.T,(-1,576)) print(shape(trX)) m = np.ndarray.max(rows) print("maximum value of solar power", m) trY=np.tile(labels,(32,1)) trX=trX/m return trX,trY
Example 11
def _set_x0(self, x0): if utils.is_str(x0): if type(x0) is not str: print(type(x0), x0) x0 = eval(x0) self.x0 = array(x0, dtype=float, copy=True) # should not have column or row, is just 1-D if self.x0.ndim == 2 and 1 in self.x0.shape: utils.print_warning('input x0 should be a list or 1-D array, trying to flatten ' + str(self.x0.shape) + '-array') if self.x0.shape[0] == 1: self.x0 = self.x0[0] elif self.x0.shape[1] == 1: self.x0 = array([x[0] for x in self.x0]) if self.x0.ndim != 1: raise ValueError('x0 must be 1-D array') if len(self.x0) <= 1: raise ValueError('optimization in 1-D is not supported (code was never tested)') try: self.x0.resize(self.x0.shape[0]) # 1-D array, not really necessary?! except NotImplementedError: pass # ____________________________________________________________ # ____________________________________________________________
Example 12
def fCauchy(ftrue, alpha, p): """Returns Cauchy model noisy value Cauchy with median 1e3*alpha and with p=0.2, zero otherwise P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032 """ # expects ftrue to be a np.array popsi = np.shape(ftrue) fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) * _randn(popsi) / (np.abs(_randn(popsi)) + 1e-199)) tol = 1e-8 fval = fval + 1.01 * tol idx = ftrue < tol try: fval[idx] = ftrue[idx] except IndexError: # fval is a scalar if idx: fval = ftrue return fval ### CLASS DEFINITION ###
Example 13
def __read_nsx_data_variant_a(self, nsx_nb): """ Extract nsx data from a 2.1 .nsx file """ filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb]) # get shape of data shape = ( self.__nsx_databl_param['2.1']('nb_data_points', nsx_nb), self.__nsx_basic_header[nsx_nb]['channel_count']) offset = self.__nsx_params['2.1']('bytes_in_headers', nsx_nb) # read nsx data # store as dict for compatibility with higher file specs data = {1: np.memmap( filename, dtype='int16', shape=shape, offset=offset)} return data
Example 14
def __read_nsx_data_variant_b(self, nsx_nb): """ Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise if the recording was paused by the user. """ filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb]) data = {} for data_bl in self.__nsx_data_header[nsx_nb].keys(): # get shape and offset of data shape = ( self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'], self.__nsx_basic_header[nsx_nb]['channel_count']) offset = \ self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block'] # read data data[data_bl] = np.memmap( filename, dtype='int16', shape=shape, offset=offset) return data
Example 15
def __read_nsx_data_variant_b(self, nsx_nb): """ Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise if the recording was paused by the user. """ filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb]) data = {} for data_bl in self.__nsx_data_header[nsx_nb].keys(): # get shape and offset of data shape = ( self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'], self.__nsx_basic_header[nsx_nb]['channel_count']) offset = \ self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block'] # read data data[data_bl] = np.memmap( filename, dtype='int16', shape=shape, offset=offset) return data
Example 16
def unscentedTransform(X, Wm, Wc, f): Y = None Ymean = None fdim = None N = np.shape(X)[1] for j in range(0,N): fImage = f(X[:,j]) if Y is None: fdim = np.size(fImage) Y = np.zeros((fdim, np.shape(X)[1])) Ymean = np.zeros(fdim) Y[:,j] = fImage Ymean += Wm[j] * Y[:,j] Ycov = np.zeros((fdim, fdim)) for j in range(0, N): meanAdjustedYj = Y[:,j] - Ymean Ycov += np.outer(Wc[j] * meanAdjustedYj, meanAdjustedYj) return Y, Ymean, Ycov
Example 17
def predict(self): try: X, Wm, Wc = sigmaPoints(self.xa, self.Pa) except: warnings.warn('Encountered a matrix that is not positive definite in the sigma points calculation at the predict step') self.Pa = nearpd(self.Pa) X, Wm, Wc = sigmaPoints(self.xa, self.Pa) fX, x, Pxx = unscentedTransform(X, Wm, Wc, self.fa) x = np.asscalar(x) Pxx = np.asscalar(Pxx) Pxv = 0. N = np.shape(X)[1] for j in range(0, N): Pxv += Wc[j] * fX[0,j] * X[3,j] self.xa = np.array( ((x,), (0.,), (0.,), (0.,)) ) self.Pa = np.array( ((Pxx, Pxv , 0. , 0. ), (Pxv, self.R, 0. , 0. ), (0. , 0. , self.Q , self.cor), (0. , 0. , self.cor, self.R )) )
Example 18
def precompute(self): # CSR_W = cuda_cffi.cusparse.CSR.to_CSR(self.st['W_gpu'],diag_type=True) # Dia_W_cpu = scipy.sparse.dia_matrix( (self.st['M'], self.st['M']),dtype=dtype) # Dia_W_cpu = scipy.sparse.dia_matrix( ( self.st['W'], 0 ), shape=(self.st['M'], self.st['M']) ) # Dia_W_cpu = scipy.sparse.diags(self.st['W'], format="csr", dtype=dtype) # CSR_W = cuda_cffi.cusparse.CSR.to_CSR(Dia_W_cpu) self.st['pHp_gpu'] = self.CSRH.gemm(self.CSR) self.st['pHp']=self.st['pHp_gpu'].get() print('untrimmed',self.st['pHp'].nnz) self.truncate_selfadjoint(1e-5) print('trimmed', self.st['pHp'].nnz) self.st['pHp_gpu'] = cuda_cffi.cusparse.CSR.to_CSR(self.st['pHp']) # self.st['pHWp_gpu'] = self.CSR.conj().gemm(CSR_W,transA=cuda_cffi.cusparse.CUSPARSE_OPERATION_TRANSPOSE) # self.st['pHWp_gpu'] = self.st['pHWp_gpu'].gemm(self.CSR, transA=cuda_cffi.cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE)
Example 19
def plan(self, om, Nd, Kd, Jd): self.debug = 0 # debug n_shift = tuple(0*x for x in Nd) self.st = plan(om, Nd, Kd, Jd) self.Nd = self.st['Nd'] # backup self.sn = self.st['sn'] # backup self.ndims=len(self.st['Nd']) # dimension self.linear_phase(n_shift) # calculate the linear phase thing self.st['pH'] = self.st['p'].getH().tocsr() self.st['pHp']= self.st['pH'].dot(self.st['p']) self.NdCPUorder, self.KdCPUorder, self.nelem = preindex_copy(self.st['Nd'], self.st['Kd']) # self.st['W'] = self.pipe_density() self.shape = (self.st['M'], numpy.prod(self.st['Nd'])) # print('untrimmed',self.st['pHp'].nnz) # self.truncate_selfadjoint(1e-1) # print('trimmed', self.st['pHp'].nnz)
Example 20
def create_dummy_data(self): self.datasetname = 'sherlock' self.read_metadata_json(self.dataset.getMetadata()) self.worddict, self.lenwords, self.randwords = self.dataset.loadVocabulary() # normalized probability matrix, words in a topic #self.wordprob = self.dataset.getWordsInTopicMatrix() #self.numtopics = numpy.shape(self.wordprob)[0] self.email_prob = self.dataset.getWordsInTopicMatrix() self.numtopics = numpy.shape(self.email_prob)[0] print(self.numtopics) # normalized probability matrix, emails in a topic self.num_emails = len(self.metadata) #self.email_prob = self.dataset.getDocsInTopicMatrix() self.wordprob = self.dataset.getDocsInTopicMatrix() #import pdb; pdb.set_trace() # distance matrix between topics self.distance_matrix = self.dataset.getTopicDistanceMatrix(self.wordprob)
Example 21
def generateWekaFile(X,Y,features,path,name): f = open(path + name + '.arff', 'w') f.write("@relation '" + name + "'\n\n") for feat in features: f.write("@attribute " + feat + " numeric\n") f.write("@attribute cluster {True,False}\n\n") f.write("@data\n\n") for i in range(X.shape[0]): for j in range(X.shape[1]): if np.isnan(X[i,j]): f.write("?,") else: f.write(str(X[i,j]) + ",") if Y[i] == 1.0 or Y[i] == True: f.write("True\n") else: f.write("False\n") f.close()
Example 22
def mahalanobis_distance(difference, num_random_features): num_samples, _ = np.shape(difference) sigma = np.cov(np.transpose(difference)) mu = np.mean(difference, 0) if num_random_features == 1: stat = float(num_samples * mu ** 2) / float(sigma) else: try: linalg.inv(sigma) except LinAlgError: print('covariance matrix is singular. Pvalue returned is 1.1') warnings.warn('covariance matrix is singular. Pvalue returned is 1.1') return 0 stat = num_samples * mu.dot(linalg.solve(sigma, np.transpose(mu))) return chi2.sf(stat, num_random_features)
Example 23
def compute_pvalue(self, samples): samples = self._make_two_dimensional(samples) self.shape = samples.shape[1] stein_statistics = [] for f in range(self.number_of_random_frequencies): # This is a little bit of a bug , but th holds even for this choice random_frequency = np.random.randn() matrix_of_stats = self.stein_stat(random_frequency=random_frequency, samples=samples) stein_statistics.append(matrix_of_stats) normal_under_null = np.hstack(stein_statistics) normal_under_null = self._make_two_dimensional(normal_under_null) return mahalanobis_distance(normal_under_null, normal_under_null.shape[1])
Example 24
def extractOuterGrid(img): rows,cols = np.shape(img) maxArea = 0 point = [0,0] imgOriginal = img.copy() for i in range(rows): for j in range(cols): if img[i][j] == 255: img,area,dummy = customFloodFill(img,[i,j],100,0) if area > maxArea: maxArea = area point = [i,j] img = imgOriginal img,area,dummy = customFloodFill(img,[point[0],point[1]],100,0) for i in range(rows): for j in range(cols): if img[i][j] == 100: img[i][j] = 255 else: img[i][j] = 0 return img,point # Draws a line on the image given its parameters in normal form
Example 25
def centerDigit(img): xMean,yMean,count = 0,0,0 (x,y) = np.shape(img) for i in range(x): for j in range(y): if img[i][j] == 255: xMean,yMean,count = (xMean+i),(yMean+j),(count+1) if count == 0: return img xMean,yMean = (xMean / count),(yMean / count) xDisp,yDisp = (xMean - (x/2)),(yMean - (y/2)) newImg = np.zeros((x,y),np.uint8) for i in range(x): for j in range(y): if img[i][j] == 255: newImg[i-xDisp][j-yDisp] = 255 return newImg # Given the cropped out digit, places it on a black background for matching with templates
Example 26
def outlier_identification(self, model, x_train, y_train): # Split the training data into an extra set of test x_train_split, x_test_split, y_train_split, y_test_split = train_test_split(x_train, y_train) print('\nOutlier shapes') print(np.shape(x_train_split), np.shape(x_test_split), np.shape(y_train_split), np.shape(y_test_split)) model.fit(x_train_split, y_train_split) y_predicted = model.predict(x_test_split) residuals = np.absolute(y_predicted - y_test_split) rmse_pred_vs_actual = self.rmse(y_predicted, y_test_split) outliers_mask = residuals >= rmse_pred_vs_actual outliers_mask = np.concatenate([np.zeros((np.shape(y_train_split)[0],), dtype=bool), outliers_mask]) not_an_outlier = outliers_mask == 0 # Resample the training set from split, since the set was randomly split x_out = np.insert(x_train_split, np.shape(x_train_split)[0], x_test_split, axis=0) y_out = np.insert(y_train_split, np.shape(y_train_split)[0], y_test_split, axis=0) return x_out[not_an_outlier, ], y_out[not_an_outlier, ]
Example 27
def __init__(self,to_plot = True): self.state = np.array([0,0]) self.observation_shape = np.shape(self.get_state())[0] if to_plot: plt.ion() fig = plt.figure() ax1 = fig.add_subplot(111,aspect='equal') #ax1.axis('off') plt.xlim([-0.5,5.5]) plt.ylim([-0.5,5.5]) self.g1 = ax1.add_artist(plt.Circle((self.state[0],self.state[1]),0.1,color='red')) self.fig = fig self.ax1 = ax1 self.fig.canvas.draw() self.fig.canvas.flush_events()
Example 28
def Dreamzs_finalize(MCMCPar,Sequences,Z,outDiag,fx,iteration,iloc,pCR,m_z,m_func): # Start with CR outDiag.CR = outDiag.CR[0:iteration-1,0:pCR.shape[1]+1] # Then R_stat outDiag.R_stat = outDiag.R_stat[0:iteration-1,0:MCMCPar.n+1] # Then AR outDiag.AR = outDiag.AR[0:iteration-1,0:2] # Adjust last value (due to possible sudden end of for loop) # Then Sequences Sequences = Sequences[0:iloc+1,0:MCMCPar.n+2,0:MCMCPar.seq] # Then the archive Z Z = Z[0:m_z,0:MCMCPar.n+2] if MCMCPar.savemodout==True: # remove zeros fx = fx[:,0:m_func] return Sequences,Z, outDiag, fx
Example 29
def load_weights(model, sess, weight_file): """ Load weights from given weight file (used to load pretrain weight of vgg model) Args: model : model to restore variable to sess : tensorflow session weight_file : weight file name """ weights = np.load(weight_file) keys = sorted(weights.keys()) for i, k in enumerate(keys): if i <= 29: print('-- %s %s --' % (i,k)) print(np.shape(weights[k])) sess.run(model.parameters_conv[i].assign(weights[k]))
Example 30
def update_canvas(widget=None): global r, Z, res, rects, painted_rects if widget is None: widget = w # Update display values r = np.repeat(np.repeat(Z,r.shape[0]//Z.shape[0],0),r.shape[1]//Z.shape[1],1) # If we're letting freeform painting happen, delete the painted rectangles for p in painted_rects: w.delete(p) painted_rects = [] for i in range(Z.shape[0]): for j in range(Z.shape[1]): w.itemconfig(int(rects[i,j]),fill = rb(255*Z[i,j]),outline = rb(255*Z[i,j])) # Function to move the paintbrush
Example 31
def logscale_spec(spec, sr=44100, factor=20.): timebins, freqbins = np.shape(spec) scale = np.linspace(0, 1, freqbins) ** factor scale *= (freqbins-1)/max(scale) scale = np.unique(np.round(scale)) # create spectrogram with new freq bins newspec = np.complex128(np.zeros([timebins, len(scale)])) for i in range(0, len(scale)): if i == len(scale)-1: newspec[:,i] = np.sum(spec[:,scale[i]:], axis=1) else: newspec[:,i] = np.sum(spec[:,scale[i]:scale[i+1]], axis=1) # list center freq of bins allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1]) freqs = [] for i in range(0, len(scale)): if i == len(scale)-1: freqs += [np.mean(allfreqs[scale[i]:])] else: freqs += [np.mean(allfreqs[scale[i]:scale[i+1]])] return newspec, freqs
Example 32
def weightVariable(shape,std=1.0,name=None): # Create a set of weights initialized with truncated normal random values name = 'weights' if name is None else name return tf.get_variable(name,shape,initializer=tf.truncated_normal_initializer(stddev=std/math.sqrt(shape[0])))
Example 33
def biasVariable(shape,bias=0.1,name=None): # create a set of bias nodes initialized with a constant 0.1 name = 'biases' if name is None else name return tf.get_variable(name,shape,initializer=tf.constant_initializer(bias))
Example 34
def max_pool(x,shape,name=None): # return an op that performs max pooling across a 2D image return tf.nn.max_pool(x,ksize=[1]+shape+[1],strides=[1]+shape+[1],padding='SAME',name=name)
Example 35
def max_pool3d(x,shape,name=None): # return an op that performs max pooling across a 2D image return tf.nn.max_pool3d(x,ksize=[1]+shape+[1],strides=[1]+shape+[1],padding='SAME',name=name)
Example 36
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01): # Receptive Fields Summary try: W = layer.W except: W = layer wp = W.eval().transpose(); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape) else: # Convolutional layer already has shape features, channels, iy, ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) perRow = int(math.floor(math.sqrt(fields.shape[0]))) perColumn = int(math.ceil(fields.shape[0]/float(perRow))) fig = mpl.figure(figOffset); mpl.clf() # Using image grid from mpl_toolkits.axes_grid1 import ImageGrid grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single') for i in range(0,np.shape(fields)[0]): im = grid[i].imshow(fields[i],cmap=cmap); grid.cbar_axes[0].colorbar(im) mpl.title('%s Receptive Fields' % layer.name) # old way # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) # tiled = [] # for i in range(0,perColumn*perRow,perColumn): # tiled.append(np.hstack(fields2[i:i+perColumn])) # # tiled = np.vstack(tiled) # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar(); mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
Example 37
def __init__(self,input,shape,name,strides=[1,1,1,1],std=1.0,bias=0.1): self.input = input self.units = shape[-1] self.shape = shape self.strides = strides self.name = name self.initialize(std=std,bias=bias) self.setupOutput() self.setupSummary()
Example 38
def initialize(self,std=1.0,bias=0.1): with tf.variable_scope(self.name): self.W = weightVariable(self.shape,std=std) # YxX patch, Z contrast, outputs to N neurons self.b = biasVariable([self.shape[-1]],bias=bias) # N bias variables to go with the N neurons
Example 39
def __init__(self,input,shape,name,strides=[1,1,1,1,1],std=1.0,bias=0.1): super(Conv3D,self).__init__(input,shape,name,strides,std,bias)
Example 40
def __init__(self,input,shape,name): self.shape = shape super(MaxPool,self).__init__(input,name)
Example 41
def setupOutput(self): with tf.variable_scope(self.name): self.output = max_pool(self.input,shape=self.shape)
Example 42
def dense_to_one_hot(labels_dense, num_classes=10): """Convert class labels from scalars to one-hot vectors.""" num_labels = labels_dense.shape[0] index_offset = numpy.arange(num_labels) * num_classes labels_one_hot = numpy.zeros((num_labels, num_classes)) labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 return labels_one_hot
Example 43
def __init__(self, images, labels, fake_data=False): if fake_data: self._num_examples = 10000 else: assert images.shape[0] == labels.shape[0], ( "images.shape: %s labels.shape: %s" % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) self.imageShape = images.shape[1:] self.imageChannels = self.imageShape[2] images = images.reshape(images.shape[0], images.shape[1] * images.shape[2] * images.shape[3]) # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels try: if len(numpy.shape(self._labels)) == 1: self._labels = dense_to_one_hot(self._labels,len(numpy.unique(self._labels))) except: traceback.print_exc() self._epochs_completed = 0 self._index_in_epoch = 0
Example 44
def weightVariable(shape,std=1.0,name=None): # Create a set of weights initialized with truncated normal random values name = 'weights' if name is None else name return tf.get_variable(name,shape,initializer=tf.truncated_normal_initializer(stddev=std/math.sqrt(shape[0])))
Example 45
def biasVariable(shape,bias=0.1,name=None): # create a set of bias nodes initialized with a constant 0.1 name = 'biases' if name is None else name return tf.get_variable(name,shape,initializer=tf.constant_initializer(bias))
Example 46
def max_pool(x,shape,name=None): # return an op that performs max pooling across a 2D image return tf.nn.max_pool(x,ksize=[1]+shape+[1],strides=[1]+shape+[1],padding='SAME',name=name)
Example 47
def plotFields(layer,fieldShape=None,channel=None,maxFields=25,figName='ReceptiveFields',cmap=None,padding=0.01): # Receptive Fields Summary W = layer.W wp = W.eval().transpose(); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape) else: # Convolutional layer already has shape features, channels, iy, ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) fieldsN = min(fields.shape[0],maxFields) perRow = int(math.floor(math.sqrt(fieldsN))) perColumn = int(math.ceil(fieldsN/float(perRow))) fig = mpl.figure(figName); mpl.clf() # Using image grid from mpl_toolkits.axes_grid1 import ImageGrid grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single') for i in range(0,fieldsN): im = grid[i].imshow(fields[i],cmap=cmap); grid.cbar_axes[0].colorbar(im) mpl.title('%s Receptive Fields' % layer.name) # old way # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) # tiled = [] # for i in range(0,perColumn*perRow,perColumn): # tiled.append(np.hstack(fields2[i:i+perColumn])) # # tiled = np.vstack(tiled) # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar(); mpl.figure(figName+' Total'); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
Example 48
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None): # Output summary W = layer.output wp = W.eval(feed_dict=feed_dict); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel() fields = np.reshape(temp,[1]+fieldShape) else: # Convolutional layer already has shape wp = np.rollaxis(wp,3,0) features, channels, iy,ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) perRow = int(math.floor(math.sqrt(fields.shape[0]))) perColumn = int(math.ceil(fields.shape[0]/float(perRow))) fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) tiled = [] for i in range(0,perColumn*perRow,perColumn): tiled.append(np.hstack(fields2[i:i+perColumn])) tiled = np.vstack(tiled) if figOffset is not None: mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
Example 49
def __init__(self,input,shape,name,std=1.0,bias=0.1): self.input = input self.units = shape[-1] self.shape = shape self.name = name self.initialize(std=std,bias=bias) self.setupOutput() self.setupSummary()
Example 50
def initialize(self,std=1.0,bias=0.1): with tf.variable_scope(self.name): self.W = weightVariable(self.shape,std=std) # YxX patch, Z contrast, outputs to N neurons self.b = biasVariable([self.shape[-1]],bias=bias) # N bias variables to go with the N neurons