Mercurial > repos > bgruening > create_tool_recommendation_model
comparison main.py @ 3:5b3c08710e47 draft
"planemo upload for repository https://github.com/bgruening/galaxytools/tree/recommendation_training/tools/tool_recommendation_model commit c635df659fe1835679438589ded43136b0e515c6"
| author | bgruening | 
|---|---|
| date | Sat, 09 May 2020 05:38:23 -0400 | 
| parents | 76251d1ccdcc | 
| children | afec8c595124 | 
   comparison
  equal
  deleted
  inserted
  replaced
| 2:76251d1ccdcc | 3:5b3c08710e47 | 
|---|---|
| 18 import utils | 18 import utils | 
| 19 | 19 | 
| 20 | 20 | 
| 21 class PredictTool: | 21 class PredictTool: | 
| 22 | 22 | 
| 23 @classmethod | |
| 24 def __init__(self, num_cpus): | 23 def __init__(self, num_cpus): | 
| 25 """ Init method. """ | 24 """ Init method. """ | 
| 26 # set the number of cpus | 25 # set the number of cpus | 
| 27 cpu_config = tf.ConfigProto( | 26 cpu_config = tf.ConfigProto( | 
| 28 device_count={"CPU": num_cpus}, | 27 device_count={"CPU": num_cpus}, | 
| 30 inter_op_parallelism_threads=num_cpus, | 29 inter_op_parallelism_threads=num_cpus, | 
| 31 allow_soft_placement=True | 30 allow_soft_placement=True | 
| 32 ) | 31 ) | 
| 33 K.set_session(tf.Session(config=cpu_config)) | 32 K.set_session(tf.Session(config=cpu_config)) | 
| 34 | 33 | 
| 35 @classmethod | 34 def find_train_best_network(self, network_config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, standard_connections, l_tool_freq, l_tool_tr_samples): | 
| 36 def find_train_best_network(self, network_config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, compatible_next_tools): | |
| 37 """ | 35 """ | 
| 38 Define recurrent neural network and train sequential data | 36 Define recurrent neural network and train sequential data | 
| 39 """ | 37 """ | 
| 38 # get tools with lowest representation | |
| 39 lowest_tool_ids = utils.get_lowest_tools(l_tool_freq) | |
| 40 | |
| 40 print("Start hyperparameter optimisation...") | 41 print("Start hyperparameter optimisation...") | 
| 41 hyper_opt = optimise_hyperparameters.HyperparameterOptimisation() | 42 hyper_opt = optimise_hyperparameters.HyperparameterOptimisation() | 
| 42 best_params, best_model = hyper_opt.train_model(network_config, reverse_dictionary, train_data, train_labels, class_weights) | 43 best_params, best_model = hyper_opt.train_model(network_config, reverse_dictionary, train_data, train_labels, test_data, test_labels, l_tool_tr_samples, class_weights) | 
| 43 | 44 | 
| 44 # define callbacks | 45 # define callbacks | 
| 45 early_stopping = callbacks.EarlyStopping(monitor='loss', mode='min', verbose=1, min_delta=1e-4, restore_best_weights=True) | 46 early_stopping = callbacks.EarlyStopping(monitor='loss', mode='min', verbose=1, min_delta=1e-1, restore_best_weights=True) | 
| 46 predict_callback_test = PredictCallback(test_data, test_labels, reverse_dictionary, n_epochs, compatible_next_tools, usage_pred) | 47 predict_callback_test = PredictCallback(test_data, test_labels, reverse_dictionary, n_epochs, usage_pred, standard_connections, lowest_tool_ids) | 
| 47 | 48 | 
| 48 callbacks_list = [predict_callback_test, early_stopping] | 49 callbacks_list = [predict_callback_test, early_stopping] | 
| 49 | 50 | 
| 51 batch_size = int(best_params["batch_size"]) | |
| 52 | |
| 50 print("Start training on the best model...") | 53 print("Start training on the best model...") | 
| 51 train_performance = dict() | 54 train_performance = dict() | 
| 52 if len(test_data) > 0: | 55 trained_model = best_model.fit_generator( | 
| 53 trained_model = best_model.fit( | 56 utils.balanced_sample_generator( | 
| 54 train_data, | 57 train_data, | 
| 55 train_labels, | 58 train_labels, | 
| 56 batch_size=int(best_params["batch_size"]), | 59 batch_size, | 
| 57 epochs=n_epochs, | 60 l_tool_tr_samples | 
| 58 verbose=2, | 61 ), | 
| 59 callbacks=callbacks_list, | 62 steps_per_epoch=len(train_data) // batch_size, | 
| 60 shuffle="batch", | 63 epochs=n_epochs, | 
| 61 validation_data=(test_data, test_labels) | 64 callbacks=callbacks_list, | 
| 62 ) | 65 validation_data=(test_data, test_labels), | 
| 63 train_performance["validation_loss"] = np.array(trained_model.history["val_loss"]) | 66 verbose=2, | 
| 64 train_performance["precision"] = predict_callback_test.precision | 67 shuffle=True | 
| 65 train_performance["usage_weights"] = predict_callback_test.usage_weights | 68 ) | 
| 66 else: | 69 train_performance["validation_loss"] = np.array(trained_model.history["val_loss"]) | 
| 67 trained_model = best_model.fit( | 70 train_performance["precision"] = predict_callback_test.precision | 
| 68 train_data, | 71 train_performance["usage_weights"] = predict_callback_test.usage_weights | 
| 69 train_labels, | 72 train_performance["published_precision"] = predict_callback_test.published_precision | 
| 70 batch_size=int(best_params["batch_size"]), | 73 train_performance["lowest_pub_precision"] = predict_callback_test.lowest_pub_precision | 
| 71 epochs=n_epochs, | 74 train_performance["lowest_norm_precision"] = predict_callback_test.lowest_norm_precision | 
| 72 verbose=2, | |
| 73 callbacks=callbacks_list, | |
| 74 shuffle="batch" | |
| 75 ) | |
| 76 train_performance["train_loss"] = np.array(trained_model.history["loss"]) | 75 train_performance["train_loss"] = np.array(trained_model.history["loss"]) | 
| 77 train_performance["model"] = best_model | 76 train_performance["model"] = best_model | 
| 78 train_performance["best_parameters"] = best_params | 77 train_performance["best_parameters"] = best_params | 
| 79 return train_performance | 78 return train_performance | 
| 80 | 79 | 
| 81 | 80 | 
| 82 class PredictCallback(callbacks.Callback): | 81 class PredictCallback(callbacks.Callback): | 
| 83 def __init__(self, test_data, test_labels, reverse_data_dictionary, n_epochs, next_compatible_tools, usg_scores): | 82 def __init__(self, test_data, test_labels, reverse_data_dictionary, n_epochs, usg_scores, standard_connections, lowest_tool_ids): | 
| 84 self.test_data = test_data | 83 self.test_data = test_data | 
| 85 self.test_labels = test_labels | 84 self.test_labels = test_labels | 
| 86 self.reverse_data_dictionary = reverse_data_dictionary | 85 self.reverse_data_dictionary = reverse_data_dictionary | 
| 87 self.precision = list() | 86 self.precision = list() | 
| 88 self.usage_weights = list() | 87 self.usage_weights = list() | 
| 88 self.published_precision = list() | |
| 89 self.n_epochs = n_epochs | 89 self.n_epochs = n_epochs | 
| 90 self.next_compatible_tools = next_compatible_tools | |
| 91 self.pred_usage_scores = usg_scores | 90 self.pred_usage_scores = usg_scores | 
| 91 self.standard_connections = standard_connections | |
| 92 self.lowest_tool_ids = lowest_tool_ids | |
| 93 self.lowest_pub_precision = list() | |
| 94 self.lowest_norm_precision = list() | |
| 92 | 95 | 
| 93 def on_epoch_end(self, epoch, logs={}): | 96 def on_epoch_end(self, epoch, logs={}): | 
| 94 """ | 97 """ | 
| 95 Compute absolute and compatible precision for test data | 98 Compute absolute and compatible precision for test data | 
| 96 """ | 99 """ | 
| 97 if len(self.test_data) > 0: | 100 if len(self.test_data) > 0: | 
| 98 precision, usage_weights = utils.verify_model(self.model, self.test_data, self.test_labels, self.reverse_data_dictionary, self.next_compatible_tools, self.pred_usage_scores) | 101 usage_weights, precision, precision_pub, low_pub_prec, low_norm_prec, low_num = utils.verify_model(self.model, self.test_data, self.test_labels, self.reverse_data_dictionary, self.pred_usage_scores, self.standard_connections, self.lowest_tool_ids) | 
| 99 self.precision.append(precision) | 102 self.precision.append(precision) | 
| 100 self.usage_weights.append(usage_weights) | 103 self.usage_weights.append(usage_weights) | 
| 101 print("Epoch %d precision: %s" % (epoch + 1, precision)) | 104 self.published_precision.append(precision_pub) | 
| 105 self.lowest_pub_precision.append(low_pub_prec) | |
| 106 self.lowest_norm_precision.append(low_norm_prec) | |
| 102 print("Epoch %d usage weights: %s" % (epoch + 1, usage_weights)) | 107 print("Epoch %d usage weights: %s" % (epoch + 1, usage_weights)) | 
| 108 print("Epoch %d normal precision: %s" % (epoch + 1, precision)) | |
| 109 print("Epoch %d published precision: %s" % (epoch + 1, precision_pub)) | |
| 110 print("Epoch %d lowest published precision: %s" % (epoch + 1, low_pub_prec)) | |
| 111 print("Epoch %d lowest normal precision: %s" % (epoch + 1, low_norm_prec)) | |
| 112 print("Epoch %d number of test samples with lowest tool ids: %s" % (epoch + 1, low_num)) | |
| 103 | 113 | 
| 104 | 114 | 
| 105 if __name__ == "__main__": | 115 if __name__ == "__main__": | 
| 106 start_time = time.time() | 116 start_time = time.time() | 
| 107 | 117 | 
| 114 arg_parser.add_argument("-pl", "--maximum_path_length", required=True, help="maximum length of tool path") | 124 arg_parser.add_argument("-pl", "--maximum_path_length", required=True, help="maximum length of tool path") | 
| 115 arg_parser.add_argument("-ep", "--n_epochs", required=True, help="number of iterations to run to create model") | 125 arg_parser.add_argument("-ep", "--n_epochs", required=True, help="number of iterations to run to create model") | 
| 116 arg_parser.add_argument("-oe", "--optimize_n_epochs", required=True, help="number of iterations to run to find best model parameters") | 126 arg_parser.add_argument("-oe", "--optimize_n_epochs", required=True, help="number of iterations to run to find best model parameters") | 
| 117 arg_parser.add_argument("-me", "--max_evals", required=True, help="maximum number of configuration evaluations") | 127 arg_parser.add_argument("-me", "--max_evals", required=True, help="maximum number of configuration evaluations") | 
| 118 arg_parser.add_argument("-ts", "--test_share", required=True, help="share of data to be used for testing") | 128 arg_parser.add_argument("-ts", "--test_share", required=True, help="share of data to be used for testing") | 
| 119 arg_parser.add_argument("-vs", "--validation_share", required=True, help="share of data to be used for validation") | |
| 120 # neural network parameters | 129 # neural network parameters | 
| 121 arg_parser.add_argument("-bs", "--batch_size", required=True, help="size of the tranining batch i.e. the number of samples per batch") | 130 arg_parser.add_argument("-bs", "--batch_size", required=True, help="size of the tranining batch i.e. the number of samples per batch") | 
| 122 arg_parser.add_argument("-ut", "--units", required=True, help="number of hidden recurrent units") | 131 arg_parser.add_argument("-ut", "--units", required=True, help="number of hidden recurrent units") | 
| 123 arg_parser.add_argument("-es", "--embedding_size", required=True, help="size of the fixed vector learned for each tool") | 132 arg_parser.add_argument("-es", "--embedding_size", required=True, help="size of the fixed vector learned for each tool") | 
| 124 arg_parser.add_argument("-dt", "--dropout", required=True, help="percentage of neurons to be dropped") | 133 arg_parser.add_argument("-dt", "--dropout", required=True, help="percentage of neurons to be dropped") | 
| 125 arg_parser.add_argument("-sd", "--spatial_dropout", required=True, help="1d dropout used for embedding layer") | 134 arg_parser.add_argument("-sd", "--spatial_dropout", required=True, help="1d dropout used for embedding layer") | 
| 126 arg_parser.add_argument("-rd", "--recurrent_dropout", required=True, help="dropout for the recurrent layers") | 135 arg_parser.add_argument("-rd", "--recurrent_dropout", required=True, help="dropout for the recurrent layers") | 
| 127 arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate") | 136 arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate") | 
| 128 arg_parser.add_argument("-ar", "--activation_recurrent", required=True, help="activation function for recurrent layers") | |
| 129 arg_parser.add_argument("-ao", "--activation_output", required=True, help="activation function for output layers") | |
| 130 | 137 | 
| 131 # get argument values | 138 # get argument values | 
| 132 args = vars(arg_parser.parse_args()) | 139 args = vars(arg_parser.parse_args()) | 
| 133 tool_usage_path = args["tool_usage_file"] | 140 tool_usage_path = args["tool_usage_file"] | 
| 134 workflows_path = args["workflow_file"] | 141 workflows_path = args["workflow_file"] | 
| 137 trained_model_path = args["output_model"] | 144 trained_model_path = args["output_model"] | 
| 138 n_epochs = int(args["n_epochs"]) | 145 n_epochs = int(args["n_epochs"]) | 
| 139 optimize_n_epochs = int(args["optimize_n_epochs"]) | 146 optimize_n_epochs = int(args["optimize_n_epochs"]) | 
| 140 max_evals = int(args["max_evals"]) | 147 max_evals = int(args["max_evals"]) | 
| 141 test_share = float(args["test_share"]) | 148 test_share = float(args["test_share"]) | 
| 142 validation_share = float(args["validation_share"]) | |
| 143 batch_size = args["batch_size"] | 149 batch_size = args["batch_size"] | 
| 144 units = args["units"] | 150 units = args["units"] | 
| 145 embedding_size = args["embedding_size"] | 151 embedding_size = args["embedding_size"] | 
| 146 dropout = args["dropout"] | 152 dropout = args["dropout"] | 
| 147 spatial_dropout = args["spatial_dropout"] | 153 spatial_dropout = args["spatial_dropout"] | 
| 148 recurrent_dropout = args["recurrent_dropout"] | 154 recurrent_dropout = args["recurrent_dropout"] | 
| 149 learning_rate = args["learning_rate"] | 155 learning_rate = args["learning_rate"] | 
| 150 activation_recurrent = args["activation_recurrent"] | |
| 151 activation_output = args["activation_output"] | |
| 152 num_cpus = 16 | 156 num_cpus = 16 | 
| 153 | 157 | 
| 154 config = { | 158 config = { | 
| 155 'cutoff_date': cutoff_date, | 159 'cutoff_date': cutoff_date, | 
| 156 'maximum_path_length': maximum_path_length, | 160 'maximum_path_length': maximum_path_length, | 
| 157 'n_epochs': n_epochs, | 161 'n_epochs': n_epochs, | 
| 158 'optimize_n_epochs': optimize_n_epochs, | 162 'optimize_n_epochs': optimize_n_epochs, | 
| 159 'max_evals': max_evals, | 163 'max_evals': max_evals, | 
| 160 'test_share': test_share, | 164 'test_share': test_share, | 
| 161 'validation_share': validation_share, | |
| 162 'batch_size': batch_size, | 165 'batch_size': batch_size, | 
| 163 'units': units, | 166 'units': units, | 
| 164 'embedding_size': embedding_size, | 167 'embedding_size': embedding_size, | 
| 165 'dropout': dropout, | 168 'dropout': dropout, | 
| 166 'spatial_dropout': spatial_dropout, | 169 'spatial_dropout': spatial_dropout, | 
| 167 'recurrent_dropout': recurrent_dropout, | 170 'recurrent_dropout': recurrent_dropout, | 
| 168 'learning_rate': learning_rate, | 171 'learning_rate': learning_rate | 
| 169 'activation_recurrent': activation_recurrent, | |
| 170 'activation_output': activation_output | |
| 171 } | 172 } | 
| 172 | 173 | 
| 173 # Extract and process workflows | 174 # Extract and process workflows | 
| 174 connections = extract_workflow_connections.ExtractWorkflowConnections() | 175 connections = extract_workflow_connections.ExtractWorkflowConnections() | 
| 175 workflow_paths, compatible_next_tools = connections.read_tabular_file(workflows_path) | 176 workflow_paths, compatible_next_tools, standard_connections = connections.read_tabular_file(workflows_path) | 
| 176 # Process the paths from workflows | 177 # Process the paths from workflows | 
| 177 print("Dividing data...") | 178 print("Dividing data...") | 
| 178 data = prepare_data.PrepareData(maximum_path_length, test_share) | 179 data = prepare_data.PrepareData(maximum_path_length, test_share) | 
| 179 train_data, train_labels, test_data, test_labels, data_dictionary, reverse_dictionary, class_weights, usage_pred = data.get_data_labels_matrices(workflow_paths, tool_usage_path, cutoff_date, compatible_next_tools) | 180 train_data, train_labels, test_data, test_labels, data_dictionary, reverse_dictionary, class_weights, usage_pred, l_tool_freq, l_tool_tr_samples = data.get_data_labels_matrices(workflow_paths, tool_usage_path, cutoff_date, compatible_next_tools, standard_connections) | 
| 180 # find the best model and start training | 181 # find the best model and start training | 
| 181 predict_tool = PredictTool(num_cpus) | 182 predict_tool = PredictTool(num_cpus) | 
| 182 # start training with weighted classes | 183 # start training with weighted classes | 
| 183 print("Training with weighted classes and samples ...") | 184 print("Training with weighted classes and samples ...") | 
| 184 results_weighted = predict_tool.find_train_best_network(config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, compatible_next_tools) | 185 results_weighted = predict_tool.find_train_best_network(config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, standard_connections, l_tool_freq, l_tool_tr_samples) | 
| 185 print() | 186 utils.save_model(results_weighted, data_dictionary, compatible_next_tools, trained_model_path, class_weights, standard_connections) | 
| 186 print("Best parameters \n") | |
| 187 print(results_weighted["best_parameters"]) | |
| 188 print() | |
| 189 utils.save_model(results_weighted, data_dictionary, compatible_next_tools, trained_model_path, class_weights) | |
| 190 end_time = time.time() | 187 end_time = time.time() | 
| 191 print() | 188 print() | 
| 192 print("Program finished in %s seconds" % str(end_time - start_time)) | 189 print("Program finished in %s seconds" % str(end_time - start_time)) | 
