The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def reconstruct_batch(self, output, batch_id, chosen_labels=None): """ Create the song associated with the network output Args: output (list[np.Array]): The ouput of the network (size batch_size*output_dim) batch_id (int): The batch that we must reconstruct chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song) Return: Song: The reconstructed song """ raise NotImplementedError('Abstract class')
Example 2
def reconstruct_batch(self, output, batch_id, chosen_labels=None): """ Create the song associated with the network output Args: output (list[np.Array]): The ouput of the network (size batch_size*output_dim) batch_id (int): The batch id chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song) Return: Song: The reconstructed song """ assert Relative.HAS_EMPTY == True processed_song = Relative.RelativeSong() processed_song.first_note = music.Note() processed_song.first_note.note = 56 # TODO: Define what should be the first note print('Reconstruct') for i, note in enumerate(output): relative = Relative.RelativeNote() # Here if we did sample the output, we should get which has heen the selected output if not chosen_labels or i == len(chosen_labels): # If chosen_labels, the last generated note has not been sampled chosen_label = int(np.argmax(note[batch_id,:])) # Cast np.int64 to int to avoid compatibility with mido else: chosen_label = int(chosen_labels[i][batch_id]) print(chosen_label, end=' ') # TODO: Add a text output connector if chosen_label == 0: # <next> token relative.pitch_class = None #relative.scale = # Note used #relative.prev_tick = else: relative.pitch_class = chosen_label-1 #relative.scale = #relative.prev_tick = processed_song.notes.append(relative) print() return self.reconstruct_song(processed_song)
Example 3
def score(self,xnew): """ Generate scores for new x values xNew should be an array-like object where each row represents a test point Return the predicted mean and standard deviation [mu,s] @param{np.Array} xnew. An numpy array where each row corrosponds to an observation @output{Array} mu. A list containing predicted mean values @output{Array} s. A list containing predicted standard deviations """ self._validate_xnew(xnew) #mu,sd = self.gp.predict(xnew,return_std=True) #return {'mu':mu.T.tolist()[0], 'sd':sd.tolist()} #K_trans = self.kernel(X, self.xTrain) #y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star) #y_mean = self.y_train_mean + y_mean # undo normal. # Compute variance of predictive distribution #y_var = self.kernel_.diag(X) #y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. #y_var_negative = y_var < 0 #if np.any(y_var_negative): # warnings.warn("Predicted variances smaller than 0. " # "Setting those variances to 0.") # y_var[y_var_negative] = 0.0 #return y_mean, np.sqrt(y_var)
Example 4
def reconstruct_batch(self, output, batch_id, chosen_labels=None): """ Create the song associated with the network output Args: output (list[np.Array]): The ouput of the network (size batch_size*output_dim) batch_id (int): The batch that we must reconstruct chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song) Return: Song: The reconstructed song """ raise NotImplementedError('Abstract class')
Example 5
def reconstruct_batch(self, output, batch_id, chosen_labels=None): """ Create the song associated with the network output Args: output (list[np.Array]): The ouput of the network (size batch_size*output_dim) batch_id (int): The batch id chosen_labels (list[np.Array[batch_size, int]]): the sampled class at each timestep (useful to reconstruct the generated song) Return: Song: The reconstructed song """ assert Relative.HAS_EMPTY == True processed_song = Relative.RelativeSong() processed_song.first_note = music.Note() processed_song.first_note.note = 56 # TODO: Define what should be the first note print('Reconstruct') for i, note in enumerate(output): relative = Relative.RelativeNote() # Here if we did sample the output, we should get which has heen the selected output if not chosen_labels or i == len(chosen_labels): # If chosen_labels, the last generated note has not been sampled chosen_label = int(np.argmax(note[batch_id,:])) # Cast np.int64 to int to avoid compatibility with mido else: chosen_label = int(chosen_labels[i][batch_id]) print(chosen_label, end=' ') # TODO: Add a text output connector if chosen_label == 0: # <next> token relative.pitch_class = None #relative.scale = # Note used #relative.prev_tick = else: relative.pitch_class = chosen_label-1 #relative.scale = #relative.prev_tick = processed_song.notes.append(relative) print() return self.reconstruct_song(processed_song)