Mercurial > repos > bgruening > svm_classifier
comparison svm.xml @ 0:9a9396e5d153 draft
planemo upload for repository https://github.com/bgruening/galaxytools/tools/sklearn commit 0e582cf1f3134c777cce3aa57d71b80ed95e6ba9
author | bgruening |
---|---|
date | Fri, 16 Feb 2018 09:16:30 -0500 |
parents | |
children | 78c664cc1841 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:9a9396e5d153 |
---|---|
1 <tool id="svm_classifier" name="Support vector machines (SVMs)" version="@VERSION@"> | |
2 <description>for classification</description> | |
3 <macros> | |
4 <import>main_macros.xml</import> | |
5 <!-- macro name="class_weight" argument="class_weight"--> | |
6 </macros> | |
7 <expand macro="python_requirements"/> | |
8 <expand macro="macro_stdio"/> | |
9 <version_command>echo "@VERSION@"</version_command> | |
10 <command><![CDATA[ | |
11 python "$svc_script" '$inputs' | |
12 ]]> | |
13 </command> | |
14 <configfiles> | |
15 <inputs name="inputs"/> | |
16 <configfile name="svc_script"> | |
17 <![CDATA[ | |
18 import sys | |
19 import json | |
20 import numpy as np | |
21 import sklearn.svm | |
22 import pandas | |
23 import pickle | |
24 | |
25 input_json_path = sys.argv[1] | |
26 params = json.load(open(input_json_path, "r")) | |
27 | |
28 #if $selected_tasks.selected_task == "load": | |
29 | |
30 classifier_object = pickle.load(open("$infile_model", 'rb')) | |
31 | |
32 data = pandas.read_csv("$selected_tasks.infile_data", sep='\t', header=0, index_col=None, parse_dates=True, encoding=None, tupleize_cols=False ) | |
33 prediction = classifier_object.predict(data) | |
34 prediction_df = pandas.DataFrame(prediction) | |
35 res = pandas.concat([data, prediction_df], axis=1) | |
36 res.to_csv(path_or_buf = "$outfile_predict", sep="\t", index=False) | |
37 | |
38 #else: | |
39 | |
40 data_train = pandas.read_csv("$selected_tasks.infile_train", sep='\t', header=0, index_col=None, parse_dates=True, encoding=None, tupleize_cols=False ) | |
41 | |
42 data = data_train.ix[:,0:len(data_train.columns)-1] | |
43 labels = np.array(data_train[data_train.columns[len(data_train.columns)-1]]) | |
44 | |
45 options = params["selected_tasks"]["selected_algorithms"]["options"] | |
46 selected_algorithm = params["selected_tasks"]["selected_algorithms"]["selected_algorithm"] | |
47 | |
48 if not(selected_algorithm=="LinearSVC"): | |
49 if options["kernel"]: | |
50 options["kernel"] = str(options["kernel"]) | |
51 | |
52 my_class = getattr(sklearn.svm, selected_algorithm) | |
53 classifier_object = my_class(**options) | |
54 classifier_object.fit(data,labels) | |
55 | |
56 pickle.dump(classifier_object,open("$outfile_fit", 'w+')) | |
57 | |
58 #end if | |
59 | |
60 ]]> | |
61 </configfile> | |
62 </configfiles> | |
63 <inputs> | |
64 <expand macro="train_loadConditional" model="zip"> | |
65 <param name="selected_algorithm" type="select" label="Classifier type"> | |
66 <option value="SVC">C-Support Vector Classification</option> | |
67 <option value="NuSVC">Nu-Support Vector Classification</option> | |
68 <option value="LinearSVC">Linear Support Vector Classification</option> | |
69 </param> | |
70 <when value="SVC"> | |
71 <expand macro="svc_advanced_options"> | |
72 <expand macro="C"/> | |
73 </expand> | |
74 </when> | |
75 <when value="NuSVC"> | |
76 <expand macro="svc_advanced_options"> | |
77 <param argument="nu" type="float" optional="true" value="0.5" label="Nu control parameter" help="Controls the number of support vectors. Should be in the interval (0, 1]. "/> | |
78 </expand> | |
79 </when> | |
80 <when value="LinearSVC"> | |
81 <section name="options" title="Advanced Options" expanded="False"> | |
82 <expand macro="C"/> | |
83 <expand macro="tol" default_value="0.001" help_text="Tolerance for stopping criterion. "/> | |
84 <expand macro="random_state" help_text="Integer number. The seed of the pseudo random number generator to use when shuffling the data for probability estimation. A fixed seed allows reproducible results."/> | |
85 <!--expand macro="class_weight"/--> | |
86 <param argument="max_iter" type="integer" optional="true" value="1000" label="Maximum number of iterations" help="The maximum number of iterations to be run."/> | |
87 <param argument="loss" type="select" label="Loss function" help="Specifies the loss function. ''squared_hinge'' is the square of the hinge loss."> | |
88 <option value="squared_hinge" selected="true">Squared hinge</option> | |
89 <option value="hinge">Hinge</option> | |
90 </param> | |
91 <param argument="penalty" type="select" label="Penalization norm" help=" "> | |
92 <option value="l1" >l1</option> | |
93 <option value="l2" selected="true">l2</option> | |
94 </param> | |
95 <param argument="dual" type="boolean" optional="true" truevalue="booltrue" falsevalue="boolflase" checked="true" label="Use the shrinking heuristic" help="Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features."/> | |
96 <param argument="multi_class" type="select" label="Multi-class strategy" help="Determines the multi-class strategy if y contains more than two classes."> | |
97 <option value="ovr" selected="true">ovr</option> | |
98 <option value="crammer_singer" >crammer_singer</option> | |
99 </param> | |
100 <param argument="fit_intercept" type="boolean" optional="true" truevalue="booltrue" falsevalue="boolflase" checked="true" label="Calculate the intercept for this model" help="If set to false, data is expected to be already centered."/> | |
101 <param argument="intercept_scaling" type="float" optional="true" value="1" label="Add synthetic feature to the instance vector" help=" "/> | |
102 </section> | |
103 </when> | |
104 </expand> | |
105 </inputs> | |
106 | |
107 <expand macro="output"/> | |
108 | |
109 <tests> | |
110 <test> | |
111 <param name="infile_train" value="train_set.tabular" ftype="tabular"/> | |
112 <param name="selected_task" value="train"/> | |
113 <param name="selected_algorithm" value="SVC"/> | |
114 <param name="random_state" value="5"/> | |
115 <output name="outfile_fit" file="svc_model01.txt"/> | |
116 </test> | |
117 <test> | |
118 <param name="infile_train" value="train_set.tabular" ftype="tabular"/> | |
119 <param name="selected_task" value="train"/> | |
120 <param name="selected_algorithm" value="NuSVC"/> | |
121 <param name="random_state" value="5"/> | |
122 <output name="outfile_fit" file="svc_model02.txt"/> | |
123 </test> | |
124 <test> | |
125 <param name="infile_train" value="train_set.tabular" ftype="tabular"/> | |
126 <param name="selected_task" value="train"/> | |
127 <param name="selected_algorithm" value="LinearSVC"/> | |
128 <param name="random_state" value="5"/> | |
129 <output name="outfile_fit" file="svc_model03.txt"/> | |
130 </test> | |
131 <test> | |
132 <param name="infile_model" value="svc_model01.txt" ftype="txt"/> | |
133 <param name="infile_data" value="test_set.tabular" ftype="tabular"/> | |
134 <param name="selected_task" value="load"/> | |
135 <output name="outfile_predict" file="svc_prediction_result01.tabular"/> | |
136 </test> | |
137 <test> | |
138 <param name="infile_model" value="svc_model02.txt" ftype="txt"/> | |
139 <param name="infile_data" value="test_set.tabular" ftype="tabular"/> | |
140 <param name="selected_task" value="load"/> | |
141 <output name="outfile_predict" file="svc_prediction_result02.tabular"/> | |
142 </test> | |
143 <test> | |
144 <param name="infile_model" value="svc_model03.txt" ftype="txt"/> | |
145 <param name="infile_data" value="test_set.tabular" ftype="tabular"/> | |
146 <param name="selected_task" value="load"/> | |
147 <output name="outfile_predict" file="svc_prediction_result03.tabular"/> | |
148 </test> | |
149 </tests> | |
150 <help><![CDATA[ | |
151 **What it does** | |
152 This module implements the Support Vector Machine (SVM) classification algorithms. | |
153 Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection. | |
154 | |
155 **The advantages of support vector machines are:** | |
156 | |
157 1- Effective in high dimensional spaces. | |
158 | |
159 2- Still effective in cases where number of dimensions is greater than the number of samples. | |
160 | |
161 3- Uses a subset of training points in the decision function (called support vectors), so it is also memory efficient. | |
162 | |
163 4- Versatile: different Kernel functions can be specified for the decision function. Common kernels are provided, but it is also possible to specify custom kernels. | |
164 | |
165 **The disadvantages of support vector machines include:** | |
166 | |
167 1- If the number of features is much greater than the number of samples, the method is likely to give poor performances. | |
168 | |
169 2- SVMs do not directly provide probability estimates, these are calculated using an expensive five-fold cross-validation | |
170 | |
171 For more information check http://scikit-learn.org/stable/modules/neighbors.html | |
172 | |
173 ]]> | |
174 </help> | |
175 <expand macro="sklearn_citation"/> | |
176 </tool> |