Mercurial > repos > bgruening > create_tool_recommendation_model
comparison main.py @ 1:12764915e1c5 draft
"planemo upload for repository https://github.com/bgruening/galaxytools/tree/recommendation_training/tools/tool_recommendation_model commit edeb85d311990eabd65f3c4576fbeabc6d9165c9"
author | bgruening |
---|---|
date | Wed, 25 Sep 2019 06:42:40 -0400 |
parents | 9bf25dbe00ad |
children | 76251d1ccdcc |
comparison
equal
deleted
inserted
replaced
0:9bf25dbe00ad | 1:12764915e1c5 |
---|---|
110 arg_parser.add_argument("-sd", "--spatial_dropout", required=True, help="1d dropout used for embedding layer") | 110 arg_parser.add_argument("-sd", "--spatial_dropout", required=True, help="1d dropout used for embedding layer") |
111 arg_parser.add_argument("-rd", "--recurrent_dropout", required=True, help="dropout for the recurrent layers") | 111 arg_parser.add_argument("-rd", "--recurrent_dropout", required=True, help="dropout for the recurrent layers") |
112 arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate") | 112 arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate") |
113 arg_parser.add_argument("-ar", "--activation_recurrent", required=True, help="activation function for recurrent layers") | 113 arg_parser.add_argument("-ar", "--activation_recurrent", required=True, help="activation function for recurrent layers") |
114 arg_parser.add_argument("-ao", "--activation_output", required=True, help="activation function for output layers") | 114 arg_parser.add_argument("-ao", "--activation_output", required=True, help="activation function for output layers") |
115 arg_parser.add_argument("-lt", "--loss_type", required=True, help="type of the loss/error function") | |
116 # get argument values | 115 # get argument values |
117 args = vars(arg_parser.parse_args()) | 116 args = vars(arg_parser.parse_args()) |
118 tool_usage_path = args["tool_usage_file"] | 117 tool_usage_path = args["tool_usage_file"] |
119 workflows_path = args["workflow_file"] | 118 workflows_path = args["workflow_file"] |
120 cutoff_date = args["cutoff_date"] | 119 cutoff_date = args["cutoff_date"] |
132 spatial_dropout = args["spatial_dropout"] | 131 spatial_dropout = args["spatial_dropout"] |
133 recurrent_dropout = args["recurrent_dropout"] | 132 recurrent_dropout = args["recurrent_dropout"] |
134 learning_rate = args["learning_rate"] | 133 learning_rate = args["learning_rate"] |
135 activation_recurrent = args["activation_recurrent"] | 134 activation_recurrent = args["activation_recurrent"] |
136 activation_output = args["activation_output"] | 135 activation_output = args["activation_output"] |
137 loss_type = args["loss_type"] | |
138 | 136 |
139 config = { | 137 config = { |
140 'cutoff_date': cutoff_date, | 138 'cutoff_date': cutoff_date, |
141 'maximum_path_length': maximum_path_length, | 139 'maximum_path_length': maximum_path_length, |
142 'n_epochs': n_epochs, | 140 'n_epochs': n_epochs, |
150 'dropout': dropout, | 148 'dropout': dropout, |
151 'spatial_dropout': spatial_dropout, | 149 'spatial_dropout': spatial_dropout, |
152 'recurrent_dropout': recurrent_dropout, | 150 'recurrent_dropout': recurrent_dropout, |
153 'learning_rate': learning_rate, | 151 'learning_rate': learning_rate, |
154 'activation_recurrent': activation_recurrent, | 152 'activation_recurrent': activation_recurrent, |
155 'activation_output': activation_output, | 153 'activation_output': activation_output |
156 'loss_type': loss_type | |
157 } | 154 } |
158 | 155 |
159 # Extract and process workflows | 156 # Extract and process workflows |
160 connections = extract_workflow_connections.ExtractWorkflowConnections() | 157 connections = extract_workflow_connections.ExtractWorkflowConnections() |
161 workflow_paths, compatible_next_tools = connections.read_tabular_file(workflows_path) | 158 workflow_paths, compatible_next_tools = connections.read_tabular_file(workflows_path) |