comparison create_tool_recommendation_model.xml @ 3:5b3c08710e47 draft

"planemo upload for repository https://github.com/bgruening/galaxytools/tree/recommendation_training/tools/tool_recommendation_model commit c635df659fe1835679438589ded43136b0e515c6"
author bgruening
date Sat, 09 May 2020 05:38:23 -0400
parents 76251d1ccdcc
children afec8c595124
comparison
equal deleted inserted replaced
2:76251d1ccdcc 3:5b3c08710e47
1 <tool id="create_tool_recommendation_model" name="Create a model to recommend tools" version="0.0.1"> 1 <tool id="create_tool_recommendation_model" name="Create a model to recommend tools" version="0.0.2">
2 <description>using deep learning</description> 2 <description>using deep learning</description>
3 <requirements> 3 <requirements>
4 <requirement type="package" version="3.6">python</requirement> 4 <requirement type="package" version="3.6">python</requirement>
5 <requirement type="package" version="1.13.1">tensorflow</requirement> 5 <requirement type="package" version="1.13.1">tensorflow</requirement>
6 <requirement type="package" version="2.3.0">keras</requirement> 6 <requirement type="package" version="2.3.0">keras</requirement>
19 --maximum_path_length '$data_parameters.input_maximum_path_length' 19 --maximum_path_length '$data_parameters.input_maximum_path_length'
20 --n_epochs '$training_parameters.n_epochs' 20 --n_epochs '$training_parameters.n_epochs'
21 --optimize_n_epochs '$training_parameters.optimize_n_epochs' 21 --optimize_n_epochs '$training_parameters.optimize_n_epochs'
22 --max_evals '$training_parameters.max_evals' 22 --max_evals '$training_parameters.max_evals'
23 --test_share '$training_parameters.test_share' 23 --test_share '$training_parameters.test_share'
24 --validation_share '$training_parameters.validation_share'
25 --batch_size '$nn_parameters.batch_size' 24 --batch_size '$nn_parameters.batch_size'
26 --units '$nn_parameters.units' 25 --units '$nn_parameters.units'
27 --embedding_size '$nn_parameters.embedding_size' 26 --embedding_size '$nn_parameters.embedding_size'
28 --dropout '$nn_parameters.dropout' 27 --dropout '$nn_parameters.dropout'
29 --spatial_dropout '$nn_parameters.spatial_dropout' 28 --spatial_dropout '$nn_parameters.spatial_dropout'
30 --recurrent_dropout '$nn_parameters.recurrent_dropout' 29 --recurrent_dropout '$nn_parameters.recurrent_dropout'
31 --learning_rate '$nn_parameters.learning_rate' 30 --learning_rate '$nn_parameters.learning_rate'
32 --activation_recurrent '$nn_parameters.activation_recurrent'
33 --activation_output '$nn_parameters.activation_output'
34 --output_model '$outfile_model' 31 --output_model '$outfile_model'
35 ]]> 32 ]]>
36 </command> 33 </command>
37 <inputs> 34 <inputs>
38 <param name="input_tabular_workflows" type="data" format="tabular" label="Dataset containing workflows" help="Please provide Galaxy workflows as a tabular file."/> 35 <param name="input_tabular_workflows" type="data" format="tabular" label="Dataset containing workflows" help="Please provide Galaxy workflows as a tabular file."/>
43 40
44 <param name="input_maximum_path_length" type="integer" value="25" label="Maximum number of tools in a tool sequence" help="Provide an integer between 1 and 25. A workflow is divided into unique paths and this number specifies the maximum number of tools a path can have. Paths longer than this number are ignored and are not included in the deep learning training."/> 41 <param name="input_maximum_path_length" type="integer" value="25" label="Maximum number of tools in a tool sequence" help="Provide an integer between 1 and 25. A workflow is divided into unique paths and this number specifies the maximum number of tools a path can have. Paths longer than this number are ignored and are not included in the deep learning training."/>
45 42
46 </section> 43 </section>
47 <section name="training_parameters" title="Training parameters" expanded="False"> 44 <section name="training_parameters" title="Training parameters" expanded="False">
48 <param name="max_evals" type="integer" value="50" label="Maximum number of evaluations of different configurations of parameters" help="Provide an integer. Different combinations of parameters are sampled and optimized to find the best one. This number specifies the number of different configurations sampled and tested."/> 45 <param name="max_evals" type="integer" value="20" label="Maximum number of evaluations of different configurations of parameters" help="Provide an integer. Different combinations of parameters are sampled and optimized to find the best one. This number specifies the number of different configurations sampled and tested."/>
49 46
50 <param name="optimize_n_epochs" type="integer" value="20" label="Number of training iterations to optimize the neural network parameters" help="Provide an integer. This number specifies the number of training iterations done for each sampled configuration while optimising the parameters."/> 47 <param name="optimize_n_epochs" type="integer" value="5" label="Number of training iterations to optimize the neural network parameters" help="Provide an integer. This number specifies the number of training iterations done for each sampled configuration while optimising the parameters."/>
51 48
52 <param name="n_epochs" type="integer" value="20" label="Number of training iterations" help="Provide an integer. This specifies the number of deep learning training iterations done after finding the best/optimised configuration of neural network parameters."/> 49 <param name="n_epochs" type="integer" value="10" label="Number of training iterations" help="Provide an integer. This specifies the number of deep learning training iterations done after finding the best/optimised configuration of neural network parameters."/>
53 50
54 <param name="test_share" type="float" value="0.0" label="Share of the test data" help="Provide a real number between 0.0 and 1.0. This set of data is used to look through the prediction accuracy on unseen data after neural network training on an optimised configuration of parameters. It should be set to 0.0 while training for a model to be deployed to production. The minimum value can be 0.0 and maximum value should not be more than 0.5."/> 51 <param name="test_share" type="float" value="0.2" label="Share of the test data" help="Provide a real number between 0.0 and 1.0. This set of data is used to look through the prediction accuracy on unseen data after neural network training on an optimised configuration of parameters. It should be set to 0.0 while training for a model to be deployed to production. The minimum value can be 0.0 and maximum value should not be more than 0.5."/>
55
56 <param name="validation_share" type="float" value="0.2" label="Share of the validation data" help="Provide a real number between 0.0 and 1.0. This set of data is used to validate each step of learning while optimising the configurations of parameters. The minimum value can be 0.0 and maximum value should not be more than 0.5."/>
57 52
58 </section> 53 </section>
59 <section name="nn_parameters" title="Neural network parameters" expanded="False"> 54 <section name="nn_parameters" title="Neural network parameters" expanded="False">
60 <param name="batch_size" type="text" value="1,512" label="Training batch size" help="Provide a comma-separated range to sample the batch size from. The training of the neural network is done using batch learning in this work. The training data is divided into equal batches and for each epoch (a training iteration), all the batches of data are trained one after another. An example: 1,512." /> 55 <param name="batch_size" type="text" value="32,256" label="Training batch size" help="Provide a comma-separated range to sample the batch size from. The training of the neural network is done using batch learning in this work. The training data is divided into equal batches and for each epoch (a training iteration), all the batches of data are trained one after another. An example: 32,256." />
61 56
62 <param name="units" type="text" value="1,512" label="Number of hidden recurrent units" help="Provide a comma-separated range to sample the number of hidden recurrent units. A higher value provide stronger neural network model (may lead to overfitting in case of smaller data) and smaller value leads to weaker model (may lead to underfitting in case of larger data). An example: 1,512."/> 57 <param name="units" type="text" value="32,512" label="Number of hidden recurrent units" help="Provide a comma-separated range to sample the number of hidden recurrent units. A higher value provide stronger neural network model (may lead to overfitting in case of smaller data) and smaller value leads to weaker model (may lead to underfitting in case of larger data). An example: 32,512."/>
63 58
64 <param name="embedding_size" type="text" value="1,512" label="Embedding vector size" help="Provide a comma-separated range to sample the embedding size for tools. A fixed-size vector is learned for each tool. This number specifies the fixed-size. An example: 1,512."/> 59 <param name="embedding_size" type="text" value="32,512" label="Embedding vector size" help="Provide a comma-separated range to sample the embedding size for tools. A fixed-size vector is learned for each tool. This number specifies the fixed-size. An example: 32,512."/>
65 60
66 <param name="dropout" type="text" value="0.0,0.5" label="Dropout between neural network layers" help="Provide a comma-separated range to sample the amount of dropout to be used after neural netwrok layers. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting after each neural network layer. An example: 0.0,0.5"/> 61 <param name="dropout" type="text" value="0.0,0.5" label="Dropout between neural network layers" help="Provide a comma-separated range to sample the amount of dropout to be used after neural netwrok layers. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting after each neural network layer. An example: 0.0,0.5"/>
67 62
68 <param name="spatial_dropout" type="text" value="0.0,0.5" label="Dropout for the embedding layer" help="Provide a comma-separated range to sample the amount of dropout to be used after embedding layer. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the embedding layer. An example: 0.0,0.5"/> 63 <param name="spatial_dropout" type="text" value="0.0,0.5" label="Dropout for the embedding layer" help="Provide a comma-separated range to sample the amount of dropout to be used after embedding layer. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the embedding layer. An example: 0.0,0.5"/>
69 64
70 <param name="recurrent_dropout" type="text" value="0.0,0.5" label="Dropout for recurrent layers" help="Provide a comma-separated range to sample the amount of dropout to be used for the recurrent layers. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the recurrent layers. An example: 0.0,0.5"/> 65 <param name="recurrent_dropout" type="text" value="0.0,0.5" label="Dropout for recurrent layers" help="Provide a comma-separated range to sample the amount of dropout to be used for the recurrent layers. The minimum value should be 0.0 and the maximum value should be 1.0. Dropout is used to prevent or minimize overfitting in the recurrent layers. An example: 0.0,0.5"/>
71 66
72 <param name="learning_rate" type="text" value="0.0001,0.1" label="Learning rate" help="Provide a range of positive real numbers to sample the learning rate. Learning rate defines the speed of neural network learning. A higher value will ensure fast learning and smaller value will ensure slower learning. An example: 0.0001,0.1"/> 67 <param name="learning_rate" type="text" value="0.0001,0.1" label="Learning rate" help="Provide a range of positive real numbers to sample the learning rate. Learning rate defines the speed of neural network learning. A higher value will ensure fast learning and smaller value will ensure slower learning. An example: 0.0001,0.1"/>
73 <param name="activation_recurrent" type="text" value="elu" label="Name of the activation function for recurrent layers" help="It is a mathematical function that transforms the input of recurrent layers to the following neural network layer."/>
74
75 <param name="activation_output" type="text" value="sigmoid" label="Name of the activation function for output layer" help="It is a mathematical function that transforms the input of the last dense layer to the output of the neural network."/>
76 </section> 68 </section>
77 </inputs> 69 </inputs>
78 <outputs> 70 <outputs>
79 <data format="h5" name="outfile_model" label="Model to recommend tools in Galaxy"></data> 71 <data format="h5" name="outfile_model" label="Model to recommend tools in Galaxy"></data>
80 </outputs> 72 </outputs>
81 <tests> 73 <tests>
82 <test> 74 <test>
83 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/> 75 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>
84 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 76 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
85 <param name="max_evals" value="1"/> 77 <param name="max_evals" value="1"/>
78 <param name="batch_size" value="1,2"/>
86 <param name="optimize_n_epochs" value="1"/> 79 <param name="optimize_n_epochs" value="1"/>
87 <param name="n_epochs" value="1"/> 80 <param name="n_epochs" value="1"/>
88 <param name="test_share" value="0.1"/> 81 <param name="test_share" value="0.2"/>
89 <output name="outfile_model"> 82 <output name="outfile_model">
90 <assert_contents> 83 <assert_contents>
91 <has_h5_keys keys="best_parameters,class_weights,compatible_tools,data_dictionary,model_config,weight_0,weight_1,weight_2,weight_3,weight_4,weight_5,weight_6,weight_7,weight_8"/> 84 <has_h5_keys keys="best_parameters,class_weights,compatible_tools,data_dictionary,model_config,weight_0,weight_1,weight_2,weight_3,weight_4,weight_5,weight_6,weight_7,weight_8"/>
92 </assert_contents> 85 </assert_contents>
93 </output> 86 </output>
94 </test> 87 </test>
95 <test> 88 <test>
96 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/> 89 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>
97 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 90 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
98 <param name="max_evals" value="1"/> 91 <param name="max_evals" value="1"/>
92 <param name="batch_size" value="1,2"/>
99 <param name="optimize_n_epochs" value="1"/> 93 <param name="optimize_n_epochs" value="1"/>
100 <param name="n_epochs" value="1"/> 94 <param name="n_epochs" value="1"/>
101 <param name="test_share" value="0.0"/>
102 <output name="outfile_model"> 95 <output name="outfile_model">
103 <assert_contents> 96 <assert_contents>
104 <has_h5_keys keys="best_parameters,class_weights,compatible_tools,data_dictionary,model_config,weight_0,weight_1,weight_2,weight_3,weight_4,weight_5,weight_6,weight_7,weight_8"/> 97 <has_h5_keys keys="best_parameters,class_weights,compatible_tools,data_dictionary,model_config,weight_0,weight_1,weight_2,weight_3,weight_4,weight_5,weight_6,weight_7,weight_8"/>
105 </assert_contents> 98 </assert_contents>
106 </output> 99 </output>
107 </test> 100 </test>
108 <test expect_failure="true"> 101 <test expect_failure="true">
109 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/> 102 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>
110 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 103 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
111 <param name="validation_share" value="0.0"/> 104 <param name="test_share" value="0.0"/>
112 </test> 105 </test>
113 <test expect_failure="true"> 106 <test expect_failure="true">
114 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/> 107 <param name="input_tabular_workflows" value="test_workflows" ftype="tabular"/>
115 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/> 108 <param name="input_tabular_tool_usage" value="test_tool_usage" ftype="tabular"/>
116 <param name="batch_size" value="1"/> 109 <param name="batch_size" value="1"/>
160 153
161 There are two input files: 154 There are two input files:
162 155
163 1. The first file ("dataset containing workflows") contains tool connections for workflows in a tabular format. The workflows are arranged as pairs of tool connections. Each row is a pair of tool connections in a workflow as shown below: 156 1. The first file ("dataset containing workflows") contains tool connections for workflows in a tabular format. The workflows are arranged as pairs of tool connections. Each row is a pair of tool connections in a workflow as shown below:
164 157
165 ========== =================== ========= ================= ============= ========== ============ ============== 158 ========== ================ ========== ============= ============= =========== ============= ============== ============== =========== =============
166 **wf_id** **wf_updated** **in_id** **in_tool** **in_tool_v** **out_id** **out_tool** **out_tool_v** 159 **wf_id** **wf_updated** **in_id** **in_tool** **in_tool_v** **out_id** **out_tool** **out_tool_v** **published** **deleted** **has_error**
167 ---------- ------------------- --------- ----------------- ------------- ---------- ------------ -------------- 160 ---------- ---------------- ---------- ------------- ------------- ----------- ------------- -------------- -------------- ----------- -------------
168 3 2013-02-07 16:48:00 7 Remove beginning1 1.0.0 5 Grep1 1.0.1 161 3 2013-02-07 7 Cut1 1.0.0 5 Grep1 1.0.1 f t f
169 ========== =================== ========= ================= ============= ========== ============ ============== 162 ========== ================ ========== ============= ============= =========== ============= ============== ============== =========== =============
170 163
171 The first column (wf_id) is the workflow id, second (wf_updated) is the last updated date timestamp, third (in_id) is the id of the tool which is the input to the tool connection, fourth (in_tool) is the name of the input tool, fifth (in_tool_v) is the version of the input tool, sixth (out_id) is the id of the output tool in the tool connection, seventh (out_tool) is the name of the output tool and the last one (out_tool_v) is the version of the output tool. The tools connections (rows) for each workflow are used to recreate the workflow (directed acyclic graph) and unique tool sequences for each workflow are extracted. These tool sequences are then used to learn higher-order dependencies using a recurrent neural network to recommend tools. 164 The first column (wf_id) is the workflow id, second (wf_updated) is the last updated date timestamp, third (in_id) is the id of the tool which is the input to the tool connection, fourth (in_tool) is the name of the input tool, fifth (in_tool_v) is the version of the input tool, sixth (out_id) is the id of the output tool in the tool connection, seventh (out_tool) is the name of the output tool and the last one (out_tool_v) is the version of the output tool. The tools connections (rows) for each workflow are used to recreate the workflow (directed acyclic graph) and unique tool sequences for each workflow are extracted. These tool sequences are then used to learn higher-order dependencies using a recurrent neural network to recommend tools. The last 3 columns give more information about workflows if they are published, non-deleted and has any errors. Collectively, they are useful to determine if the workflows are of good quality.
172 165
173 2. The second file ("dataset containing usage frequencies of tools") is also a tabular file containing the usage frequencies of tools for a period of time. It has 3 columns: 166 2. The second file ("dataset containing usage frequencies of tools") is also a tabular file containing the usage frequencies of tools for a period of time. It has 3 columns:
174 167
175 ============================================================================================ ========== === 168 ============================================================================================ ========== ===
176 upload1 2019-03-01 176 169 upload1 2019-03-01 176
177 toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.72 2019-03-01 97 170 toolshed.g2.bx.psu.edu/repos/devteam/fastqc/fastqc/0.72 2019-03-01 97
194 2. Training parameters 187 2. Training parameters
195 - "max_evals": The hyperparameters of the neural network are tuned using a Bayesian optimisation approach and multiple configurations are sampled from different ranges of parameters. The number specified in this parameter is the number of configurations of hyperparameters evaluated to optimise them. Higher the number, the longer is the running time of the tool. 188 - "max_evals": The hyperparameters of the neural network are tuned using a Bayesian optimisation approach and multiple configurations are sampled from different ranges of parameters. The number specified in this parameter is the number of configurations of hyperparameters evaluated to optimise them. Higher the number, the longer is the running time of the tool.
196 - "optimize_n_epochs": This number specifies how many iterations would the neural network executes to evaluate each sampled configuration. 189 - "optimize_n_epochs": This number specifies how many iterations would the neural network executes to evaluate each sampled configuration.
197 - "n_epochs": Once the best configuration of hyperparameters has been found, the neural network takes this configuration and runs for "n_epochs" number of times minimising the error to produce a model at the end. 190 - "n_epochs": Once the best configuration of hyperparameters has been found, the neural network takes this configuration and runs for "n_epochs" number of times minimising the error to produce a model at the end.
198 - "test_share": It specifies the size of the test set. For example, if it is 0.5, then the test set is half of the entire data available. It should not be set to more than 0.5. This set is used for evaluating the precision on an unseen set. 191 - "test_share": It specifies the size of the test set. For example, if it is 0.5, then the test set is half of the entire data available. It should not be set to more than 0.5. This set is used for evaluating the precision on an unseen set.
199 - "validation_share": It specifies the size of the validation set. For example, if it is 0.5, then the validation set is half of the entire data available. It should not be set to more than 0.5. This set is used for computing error while training on the best configuration.
200 192
201 3. Neural network parameters: 193 3. Neural network parameters:
202 - "batch_size": The training of the neural network is done using batch learning in this work. The training data is divided into equal batches and for each epoch (a training iteration), all batches of data are trained one after another. A higher or lower value can unsettle the training. Therefore, this parameter should be optimised. 194 - "batch_size": The training of the neural network is done using batch learning in this work. The training data is divided into equal batches and for each epoch (a training iteration), all batches of data are trained one after another. A higher or lower value can unsettle the training. Therefore, this parameter should be optimised.
203 - "units": This number is the number of hidden recurrent units. A higher number means stronger learning (may lead to overfitting) and a lower number means weaker learning (may lead to underfitting). Therefore, this number should be optimised. 195 - "units": This number is the number of hidden recurrent units. A higher number means stronger learning (may lead to overfitting) and a lower number means weaker learning (may lead to underfitting). Therefore, this number should be optimised.
204 - "embedding_size": For each tool, a fixed-size vector is learned and this fixed-size is known as the embedding size. This size remains same for all the tools. A lower number may underfit and a higher number may overfit. This parameter should be optimised as well. 196 - "embedding_size": For each tool, a fixed-size vector is learned and this fixed-size is known as the embedding size. This size remains same for all the tools. A lower number may underfit and a higher number may overfit. This parameter should be optimised as well.
205 - "dropout": A neural network tends to overfit (especially when it is stronger). Therefore, to avoid or minimize overfitting, dropout is used. The fraction specified by dropout is the fraction of units "deleted" randomly from the network to impose randomness which helps in avoiding overfitting. This parameter should be optimised as well. 197 - "dropout": A neural network tends to overfit (especially when it is stronger). Therefore, to avoid or minimize overfitting, dropout is used. The fraction specified by dropout is the fraction of units "deleted" randomly from the network to impose randomness which helps in avoiding overfitting. This parameter should be optimised as well.
206 - "spatial_dropout": Similar to dropout, this is used to reduce overfitting in the embedding layer. This parameter should be optimised as well. 198 - "spatial_dropout": Similar to dropout, this is used to reduce overfitting in the embedding layer. This parameter should be optimised as well.
207 - "recurrent_dropout": Similar to dropout and spatial dropout, this is used to reduce overfitting in the recurrent layers (hidden). This parameter should be optimised as well. 199 - "recurrent_dropout": Similar to dropout and spatial dropout, this is used to reduce overfitting in the recurrent layers (hidden). This parameter should be optimised as well.
208 - "learning_rate": The learning rate specifies the speed of learning. A higher value ensures fast learning (the optimiser may diverge) and a lower value causes slow learning (may not reach the optimum). This parameter should be optimised as well. 200 - "learning_rate": The learning rate specifies the speed of learning. A higher value ensures fast learning (the optimiser may diverge) and a lower value causes slow learning (may not reach the optimum). This parameter should be optimised as well.
209 - "activation_recurrent": Activations are mathematical functions to transform input into output. This takes the name of an activation function from the list of Keras activations (https://keras.io/activations/) for recurrent layers.
210 - "activation_output": This takes the activation for transforming the input of the last layer to the output of the neural network. It is also taken from Keras activations (https://keras.io/activations/).
211 201
212 ----- 202 -----
213 203
204
214 **Output file** 205 **Output file**
215 206
216 The output file (model) is an HDF5 file (http://docs.h5py.org/en/latest/high/file.html) containing multiple attributes like a dictionary of tools, neural network configuration and weights for each layer, weights of all tools and so on. After the tool has finished executing, it can be downloaded and placed at "/galaxy/database/" inside a Galaxy instance codebase. To see the recommended tools (enable the UI integrations) in Galaxy, the following changes should be made to "galaxy.yml" file: 207 The output file (model) is an HDF5 file (http://docs.h5py.org/en/latest/high/file.html) containing multiple attributes like a dictionary of tools, neural network configuration and weights for each layer, weights of all tools and so on. After the tool has finished executing, it can be downloaded and placed at "/galaxy/database/" inside a Galaxy instance codebase. To see the recommended tools (enable the UI integrations) in Galaxy, the following changes should be made to "galaxy.yml" file:
217 208
218 - Enable and then set the property "enable_tool_recommendation" to "true". 209 - Enable and then set the property "enable_tool_recommendation" to "true".
219 - Enable and then set the property "model_path" to "database/<<model_file_name>>".
220 210
221 ]]> 211 ]]>
222 </help> 212 </help>
223 <citations> 213 <citations>
224 <citation type="bibtex"> 214 <citation type="bibtex">
225 @ARTICLE{anuprulez_galaxytools, 215 @ARTICLE{anuprulez_galaxytools,
226 Author = {Anup Kumar and Björn Grüning}, 216 Author = {Anup Kumar and Björn Grüning},
227 keywords = {bioinformatics, recommendation system, deep learning}, 217 keywords = {bioinformatics, recommendation system, deep learning},
228 title = {{Tool recommendation system for Galaxy workflows}}, 218 title = {{Tool recommendation system for Galaxy}},
229 url = {https://github.com/bgruening/galaxytools} 219 url = {https://github.com/bgruening/galaxytools}
230 } 220 }
231 </citation> 221 </citation>
232 </citations> 222 </citations>
233 </tool> 223 </tool>