Mercurial > repos > bgruening > create_tool_recommendation_model
comparison create_tool_recommendation_model.xml @ 1:12764915e1c5 draft
"planemo upload for repository https://github.com/bgruening/galaxytools/tree/recommendation_training/tools/tool_recommendation_model commit edeb85d311990eabd65f3c4576fbeabc6d9165c9"
author | bgruening |
---|---|
date | Wed, 25 Sep 2019 06:42:40 -0400 |
parents | 9bf25dbe00ad |
children | 76251d1ccdcc |
comparison
equal
deleted
inserted
replaced
0:9bf25dbe00ad | 1:12764915e1c5 |
---|---|
29 --spatial_dropout '$nn_parameters.spatial_dropout' | 29 --spatial_dropout '$nn_parameters.spatial_dropout' |
30 --recurrent_dropout '$nn_parameters.recurrent_dropout' | 30 --recurrent_dropout '$nn_parameters.recurrent_dropout' |
31 --learning_rate '$nn_parameters.learning_rate' | 31 --learning_rate '$nn_parameters.learning_rate' |
32 --activation_recurrent '$nn_parameters.activation_recurrent' | 32 --activation_recurrent '$nn_parameters.activation_recurrent' |
33 --activation_output '$nn_parameters.activation_output' | 33 --activation_output '$nn_parameters.activation_output' |
34 --loss_type '$nn_parameters.loss_type' | |
35 --output_model '$outfile_model' | 34 --output_model '$outfile_model' |
36 ]]> | 35 ]]> |
37 </command> | 36 </command> |
38 <inputs> | 37 <inputs> |
39 <param name="input_tabular_workflows" type="data" format="tabular" label="Dataset containing workflows" help="Please provide Galaxy workflows as a tabular file."/> | 38 <param name="input_tabular_workflows" type="data" format="tabular" label="Dataset containing workflows" help="Please provide Galaxy workflows as a tabular file."/> |
57 <param name="spatial_dropout" type="text" value="0.0,0.5" label="Dropout for the embedding layer" help="Provide a comma-separated range to sample the amount of dropout to be used after embedding layer. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the embedding layer. An example: 0.0,0.5"/> | 56 <param name="spatial_dropout" type="text" value="0.0,0.5" label="Dropout for the embedding layer" help="Provide a comma-separated range to sample the amount of dropout to be used after embedding layer. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the embedding layer. An example: 0.0,0.5"/> |
58 <param name="recurrent_dropout" type="text" value="0.0,0.5" label="Dropout for recurrent layers" help="Provide a comma-separated range to sample the amount of dropout to be used for the recurrent layers. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the recurrent layers. An example: 0.0,0.5"/> | 57 <param name="recurrent_dropout" type="text" value="0.0,0.5" label="Dropout for recurrent layers" help="Provide a comma-separated range to sample the amount of dropout to be used for the recurrent layers. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the recurrent layers. An example: 0.0,0.5"/> |
59 <param name="learning_rate" type="text" value="0.0001,0.1" label="Learning rate" help="Provide a range of positive real numbers to sample the learning rate. Learning rate defines the speed of neural network learning. A higher value will ensure fast learning and smaller value will ensure slower learning. An example: 0.0001,0.1"/> | 58 <param name="learning_rate" type="text" value="0.0001,0.1" label="Learning rate" help="Provide a range of positive real numbers to sample the learning rate. Learning rate defines the speed of neural network learning. A higher value will ensure fast learning and smaller value will ensure slower learning. An example: 0.0001,0.1"/> |
60 <param name="activation_recurrent" type="text" value="elu" label="Name of the activation function for recurrent layers" help="It is a mathematical function that transforms the input of recurrent layers to the following neural network layer."/> | 59 <param name="activation_recurrent" type="text" value="elu" label="Name of the activation function for recurrent layers" help="It is a mathematical function that transforms the input of recurrent layers to the following neural network layer."/> |
61 <param name="activation_output" type="text" value="sigmoid" label="Name of the activation function for output layer" help="It is a mathematical function that transforms the input of the last dense layer to the output of the neural network."/> | 60 <param name="activation_output" type="text" value="sigmoid" label="Name of the activation function for output layer" help="It is a mathematical function that transforms the input of the last dense layer to the output of the neural network."/> |
62 <param name="loss_type" type="text" value="binary_crossentropy" label="Name of the loss function" help="The loss/error function computes an error between the true and predicted output. This error is minimised during neural network learning to be as close to the true output as possible. Root Mean Square Propagation (RMSProp) is used as an optimiser to minimise error computed by this error function."/> | |
63 </section> | 61 </section> |
64 </inputs> | 62 </inputs> |
65 <outputs> | 63 <outputs> |
66 <data format="h5" name="outfile_model" label="Model to recommend tools in Galaxy"></data> | 64 <data format="h5" name="outfile_model" label="Model to recommend tools in Galaxy"></data> |
67 </outputs> | 65 </outputs> |
136 - "spatial_dropout": Similar to dropout, this is used to reduce overfitting in the embedding layer. This parameter should be optimised as well. | 134 - "spatial_dropout": Similar to dropout, this is used to reduce overfitting in the embedding layer. This parameter should be optimised as well. |
137 - "recurrent_dropout": Similar to dropout and spatial dropout, this is used to reduce overfitting in the recurrent layers (hidden). This parameter should be optimised as well. | 135 - "recurrent_dropout": Similar to dropout and spatial dropout, this is used to reduce overfitting in the recurrent layers (hidden). This parameter should be optimised as well. |
138 - "learning_rate": The learning rate specifies the speed of learning. A higher value ensures fast learning (the optimiser may diverge) and a lower value causes slow learning (may not reach the optimum). This parameter should be optimised as well. | 136 - "learning_rate": The learning rate specifies the speed of learning. A higher value ensures fast learning (the optimiser may diverge) and a lower value causes slow learning (may not reach the optimum). This parameter should be optimised as well. |
139 - "activation_recurrent": Activations are mathematical functions to transform input into output. This takes the name of an activation function from the list of Keras activations (https://keras.io/activations/) for recurrent layers. | 137 - "activation_recurrent": Activations are mathematical functions to transform input into output. This takes the name of an activation function from the list of Keras activations (https://keras.io/activations/) for recurrent layers. |
140 - "activation_output": This takes the activation for transforming the input of the last layer to the output of the neural network. It is also taken from Keras activations (https://keras.io/activations/). | 138 - "activation_output": This takes the activation for transforming the input of the last layer to the output of the neural network. It is also taken from Keras activations (https://keras.io/activations/). |
141 - "loss_type": This is also a mathematical function which computes the error between true and predicted outputs. An optimizer uses this loss function to compute error and minimize it. It is taken from the list of Keras optimisers (https://keras.io/optimizers/). | |
142 | 139 |
143 ----- | 140 ----- |
144 | 141 |
145 **Output file** | 142 **Output file** |
146 | 143 |