comparison immuneml_train_repert.xml @ 0:629e7e403e19 draft

"planemo upload commit 2fed2858d4044a3897a93a5604223d1d183ceac0-dirty"
author immuneml
date Thu, 01 Jul 2021 11:36:43 +0000
parents
children ed3932e6d616
comparison
equal deleted inserted replaced
-1:000000000000 0:629e7e403e19
1 <tool id="novice_immuneml_interface" name="Train immune repertoire classifiers (simplified interface)" version="@VERSION@.0">
2 <description></description>
3 <macros>
4 <import>prod_macros.xml</import>
5 </macros>
6 <expand macro="requirements" />
7 <command><![CDATA[
8
9 #if $iml_input
10 cp -r ${iml_input.extra_files_path}/result/* . &&
11 (mv repertoires/* . &>/dev/null || :) &&
12 rm -rf repertoires &&
13 #end if
14
15 python '$__tool_directory__/build_yaml_from_arguments_wrapper.py' --output_path $specs.files_path
16 #if $labels
17 --labels "$labels"
18 #end if
19 #if $ml_methods
20 #set methods_splitted = str($ml_methods).replace(",", " ")
21 --ml_methods $methods_splitted
22 #end if
23 #if $training_percentage
24 --training_percentage $training_percentage
25 #end if
26 #if $split_count
27 --split_count $split_count
28 #end if
29 #if $sequence_cond.sequence_type
30 --sequence_type $sequence_cond.sequence_type
31 #end if
32 #if $sequence_cond.sequence_type == "subsequence"
33 --position_type $sequence_cond.position_type
34 --gap_type $sequence_cond.gap_cond.gap_type
35 #if $sequence_cond.gap_cond.gap_type == "ungapped"
36 --k $sequence_cond.gap_cond.k
37 #end if
38 #if $sequence_cond.gap_cond.gap_type == "gapped"
39 --k_left $sequence_cond.gap_cond.k_left
40 --k_right $sequence_cond.gap_cond.k_right
41 --min_gap $sequence_cond.gap_cond.min_gap
42 --max_gap $sequence_cond.gap_cond.max_gap
43 #end if
44 #end if
45 #if $reads
46 --reads $reads
47 #end if
48
49 && cp ${specs.files_path}/specs.yaml yaml_copy &&
50
51 immune-ml ./yaml_copy ${html_outfile.files_path} --tool GalaxyTrainMLModel
52
53 && mv ${html_outfile.files_path}/index.html ${html_outfile}
54 && mv ${specs.files_path}/specs.yaml ${specs}
55 && mv ${html_outfile.files_path}/immuneML_output.zip $archive
56 && mv ${html_outfile.files_path}/exported_models/*.zip ${optimal_model}
57 ]]>
58 </command>
59 <inputs>
60 <param name="iml_input" type="data" format="iml_dataset" label="Input dataset (immune repertoires)" help="Here you can select an ImmuneML dataset containing a repertoire dataset, as produced by the ‘Create dataset’ tool. Please make sure your dataset contains enough repertoires, we recommend using at least 50. The minimum number of repertoires needed to run this tool successfully is 14 (for example: 7 diseased and 7 healthy). More repertoires are needed if your dataset is imbalanced (many more diseased or many more healthy), or if you decrease the percentage of data that is used for training. "/>
61 <param type="text" name="labels" optional="false" label="Which property (“label”) of the repertoires would you like to predict?" help="Repertoire property to predict could for example be a disease status. This property must be present as a label in the repertoire metadata."/>
62
63 <conditional name="sequence_cond">
64 <param type="select" name="sequence_type" label="I assume that the true class of a repertoire (for example: disease status) can be determined based on the presence of..." display="radio" help="See 'Encoding' in the tool description.">
65 <option value="subsequence">Similar (but not identical) CDR3 sequences, or identical subsequences</option>
66 <option value="complete">Complete and identical receptor sequences</option>
67 </param>
68 <when value="subsequence">
69 <param type="boolean" name="position_type" label="If the same CDR3 subsequence occurs in a different position in two receptors, is this expected to be the same signal? "
70 truevalue="invariant" falsevalue="positional" checked="true"/>
71 <conditional name="gap_cond">
72 <param type="select" name="gap_type" label="The signal is expected to correspond to:" display="radio">
73 <option value="ungapped">Contiguous subsequences of amino acids</option>
74 <option value="gapped">Subsequences of amino acids separated by a gap</option>
75 </param>
76 <when value="ungapped">
77 <param type="integer" name="k" label="Given a contiguous subsequence of amino acids containing a signal, the expected length of this subsequence is:" value="3" min="0"/>
78 </when>
79 <when value="gapped">
80 <param type="integer" name="k_left" label="Given a gapped signal, the sequence length before the gap is:" value="2" min="0"/>
81 <param type="integer" name="k_right" label="And the sequence length after the gap is:" value="2" min="0"/>
82 <param type="integer" name="min_gap" label="While the minimal gap length is:" value="0" min="0"/>
83 <param type="integer" name="max_gap" label="And the maximal gap length is:" value="5" min="0"/>
84 </when>
85 </conditional>
86 </when>
87 </conditional>
88 <param type="select" name="reads" label="I assume that" display="radio" help="If only the presence/absence of a clonotype matters, the read frequency (‘count’) information is ignored. Otherwise, the importance of a sequence or subsequence is scaled by its read frequency, and large clonotypes will have more influence on the ML model and its results.">
89 <option value="unique">Only the presence/absence of a clone matters</option>
90 <option value="all">The frequency of a clone matters</option>
91 </param>
92
93 <param type="select" name="ml_methods" label="Which ML methods would you like to include?" help="For each ML method, the optimal hyper parameter settings are determined and the performance of the methods is compared to each other."
94 display="checkboxes" multiple="true">
95 <option value="RandomForestClassifier">Random forest</option>
96 <option value="SimpleLogisticRegression">Logistic regression</option>
97 <option value="SVM">Support Vector Machine</option>
98 <option value="KNN">K-nearest neighbors</option>
99 </param>
100
101 <param type="integer" name="training_percentage" label="Percentage of data that is used for training + validation (the remainder is used for testing):" value="70" min="50" max="90" help="This part of the data is used for training the classifier i.e., learning the relevant patterns in the data and determining the optimal hyper parameter settings for the classifier. The remaining data is used to test the performance of the classifier. There is no golden rule that determines the optimal percentage of training data, but typically a value between 60 and 80% is chosen."/>
102 <param type="integer" name="split_count" label="Number of times to repeat the training process with different random splits of data:" value="5" min="0" help="This is the number of times we split into random sets for training + validation and testing. The more often the experiment is repeated, the better the performance of the ML models can be estimated, but the longer it will take for the analysis to complete. "/>
103
104 </inputs>
105 <outputs>
106 <data format="txt" name="specs" label="repertoire_classification.yaml"/>
107 <data format="zip" name="optimal_model" label="optimal_ml_settings.zip"/>
108 <data format="zip" name="archive" label="Archive: repertoire classification"/>
109 <data format="html" name="html_outfile" label="Summary: repertoire classification"/>
110 </outputs>
111
112
113 <help><![CDATA[
114 The purpose of this tool is to train machine learning (ML) models to predict a characteristic per immune repertoire, such as
115 a disease status. One or more ML models are trained to classify repertoires based on the information within the sets of CDR3 sequences. Finally, the performance
116 of the different methods is compared.
117 Alternatively, if you want to predict a property per immune receptor, such as antigen specificity, check out the
118 `Train immune receptor classifiers (simplified interface) <https://galaxy.immuneml.uio.no/root?tool_id=immuneml_train_classifiers>`_ tool instead.
119
120 The full documentation can be found `here <https://docs.immuneml.uio.no/galaxy/galaxy_simple_repertoires.html>`_.
121
122 **Basic terminology**
123
124 In the context of ML, the characteristics to predict per repertoire are called **labels** and the values that these labels can take on are **classes**.
125 One could thus have a label named ‘CMV_status’ with possible classes ‘positive’ and ‘negative’. The labels and classes must be present in the metadata
126 file, in columns where the header and values correspond to the label and classes respectively.
127
128 .. image:: https://docs.immuneml.uio.no/_images/metadata_repertoire_classification.png
129 :height: 150
130
131 |
132
133 When training an ML model, the goal is for the model to learn **signals** within the data which discriminate between the different classes. An ML model
134 that predicts classes is also referred to as a **classifier**. A signal can have a variety of definitions, including the presence of specific receptors,
135 groups of similar receptors or short CDR3 subsequences in an immune repertoire. Our assumptions about what makes up a ‘signal’ determines how we
136 should represent our data to the ML model. This representation is called **encoding**. In this tool, the encoding is automatically chosen based on
137 the user's assumptions about the dataset.
138
139
140 .. image:: https://docs.immuneml.uio.no/_images/repertoire_classification_overview.png
141 :height: 500
142
143 |
144 |
145
146 **An overview of the components of the immuneML repertoire classification tool.**
147 immuneML reads in repertoire data with labels (+ and -), encodes the
148 data, trains user-specified ML models and summarizes the performance statistics per ML method.
149 Encoding: different forms of encoding are shown; full sequence encoding and position dependent and invariant subsequence encoding.
150 The disease-associated sequences or sub-sequences are highlighted with color. The different colors represent independent elements of the disease signal.
151 Each color represents one (sub)sequence, and position dependent subsequences can only have the same color when they occur in the same position,
152 although different colors (i.e., nucleotide or amino acid sequences) may occur in the same position.
153 Training: the training and validation data is used to train ML models and find the optimal hyperparameters through 5-fold cross-validation.
154 The test set is left out and is used to obtain a fair estimate of the model performance.
155
156 **Encoding**
157
158 The simplest encoding represents an immune repertoire based on the full CDR3 sequences that it contains. This means the ML models will learn to look
159 at which CDR3 sequences are more often present in the ‘positive’ or ‘negative’ classes. It also means that two similar (non-identical) CDR3 sequences
160 are treated as independent pieces of information; if a particular sequence often occurs in diseased repertoires, then finding a similar sequence in a
161 new repertoire is no evidence for this repertoire also being diseased.
162
163 Other encoding variants are based on shorter subsequences (e.g., 3 – 5 amino acids long, also referred to as k-mers) in the CDR3 regions of an immune repertoire. With this
164 encoding, the CDR3 regions are divided into overlapping subsequences and the (disease) signal may be characterized by the presence or absence of
165 certain sequence motifs in the CDR3 regions. Here, two similar CDR3 sequences are no longer independent, because they contain many identical subsequences.
166 A graphical representation of how a CDR3 sequence can be divided into k-mers, and how these k-mers can relate to specific positions in a 3D immune receptor
167 (here: antibody) is shown in this figure:
168
169 .. image:: https://docs.immuneml.uio.no/_images/3mer_to_3d.png
170 :height: 250
171
172 |
173
174 The subsequences may be position-dependent or invariant. Position invariant means that if a subsequence, e.g., ‘EDNA’ occurs in different positions
175 in the CDR3 it will still be considered the same signal. This is not the case for position dependent subsequences, if ‘EDNA’ often occurs in the
176 beginning of the CDR3 in diseased repertoires, then finding ‘EDNA’ in the end of a CDR3 in a new repertoire will be considered unrelated. Positions
177 are determined based on the IMGT numbering scheme.
178
179 Finally, it is possible to introduce gaps in the encoding of subsequences (not shown in the Figure). In this case, a motif is defined by two
180 subsequences separated by a region of varying nucleotide or amino acid length. Thus, the subsequences ‘EDNA’, ‘EDGNA’ and ‘EDGAGAGNA’ may all be
181 considered to be part of the same motif: ‘ED’ followed by ‘NA’ with a gap of 0 – 5 amino acids in between.
182
183 Note that in any case, the (sub)sequences that are associated with the ‘positive’ class may still be present in the ‘negative’ class, albeit at a lower rate.
184
185
186
187 **Training a machine learning model**
188
189 Training an ML model means optimizing the **parameters** for the model with the goal of predicting the correct class of an (unseen) immune repertoire.
190 Different ML methods require different procedures for training. In addition to the model parameters there are the **hyperparameters**, which
191 do not directly change the predictions of a model, but they control the learning process (for example: the learning speed).
192
193 The immune repertoires are divided into sets with different purposes: the training and validation sets are used for finding the optimal parameters
194 and hyperparameters respectively. The test set is held out, and is only used to estimate the performance of a trained model.
195
196 In this tool, a range of plausible hyperparameters have been predefined for each ML method. The optimal hyperparameters are found by splitting the
197 training/validation data into 5 equal portions, where 4 portions are used to train the ML model (with different hyperparameters) and the remaining
198 portion is used to validate the performance of these hyperparameter settings. This is repeated 5 times such that each portion has been used for
199 validation once. With the best hyperparameters found in the 5 repetitions, a final model is trained using all 5 portions of the data. This procedure
200 is also referred to as 5-fold cross-validation. Note that this 5-fold cross-validation is separate from the number of times the splitting into
201 training + validation and testing sets is done (see the overview figure).
202
203 Finally, the whole process is repeated one or more times with different randomly selected repertoires in the test set, to see how robust the performance
204 of the ML methods is. The number of times to repeat this splitting into training + validation and test sets is determined in the last question.
205
206
207 **Tool output**
208
209 This Galaxy tool will produce the following history elements:
210
211 - Summary: repertoire classification: a HTML page that allows you to browse through all results, including prediction accuracies on
212 the various data splits and plots showing the performance of classifiers and learned parameters.
213
214 - Archive: repertoire classification: a .zip file containing the complete output folder as it was produced by immuneML. This folder
215 contains the output of the TrainMLModel instruction including all trained models and their predictions, and report results.
216 Furthermore, the folder contains the complete YAML specification file for the immuneML run, the HTML output and a log file.
217
218 - optimal_ml_settings.zip: a .zip file containing the raw files for the optimal trained ML settings (ML model, encoding).
219 This .zip file can subsequently be used as an input when `applying previously trained ML models to a new AIRR dataset in Galaxy <https://docs.immuneml.uio.no/galaxy/galaxy_apply_ml_models.html>`_.
220
221 - repertoire_classification.yaml: the YAML specification file that was used by immuneML internally to run the analysis. This file can be
222 downloaded, altered, and run again by immuneML using the `Train machine learning models <https://galaxy.immuneml.uio.no/root?tool_id=immuneml_train_ml_model>`_ Galaxy tool.
223
224 **More analysis options**
225
226 A limited selection of immuneML options is available through this tool. If you wish to have full control of the analysis, consider using
227 the `Train machine learning models <https://galaxy.immuneml.uio.no/root?tool_id=immuneml_train_ml_model>`_ Galaxy tool.
228 This tool provides other encodings and machine learning methods to choose from, as well as
229 data preprocessing and settings for hyperparameter optimization. The interface of the YAML-based tool expects more independence and knowledge about
230 machine learning from the user.
231
232 ]]>
233 </help>
234
235 </tool>