84
|
1 # replace with shebang for biocontainer
|
48
|
2 # see https://github.com/fubar2/toolfactory
|
|
3 #
|
|
4 # copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
|
|
5 #
|
|
6 # all rights reserved
|
|
7 # Licensed under the LGPL
|
49
|
8 # suggestions for improvement and bug fixes welcome at
|
|
9 # https://github.com/fubar2/toolfactory
|
48
|
10 #
|
|
11 # July 2020: BCC was fun and I feel like rip van winkle after 5 years.
|
|
12 # Decided to
|
|
13 # 1. Fix the toolfactory so it works - done for simplest case
|
|
14 # 2. Fix planemo so the toolfactory function works
|
|
15 # 3. Rewrite bits using galaxyxml functions where that makes sense - done
|
|
16 #
|
|
17 # removed all the old complications including making the new tool use this same script
|
|
18 # galaxyxml now generates the tool xml https://github.com/hexylena/galaxyxml
|
|
19 # No support for automatic HTML file creation from arbitrary outputs
|
|
20 # essential problem is to create two command lines - one for the tool xml and a different
|
|
21 # one to run the executable with the supplied test data and settings
|
|
22 # Be simpler to write the tool, then run it with planemo and soak up the test outputs.
|
95
|
23 # well well. sh run_tests.sh --id rgtf2 --report_file tool_tests_tool_conf.html functional.test_toolbox
|
|
24 # does the needful. Use GALAXY_TEST_SAVE /foo to save outputs - only the tar.gz - not the rest sadly
|
|
25 # GALAXY_TEST_NO_CLEANUP GALAXY_TEST_TMP_DIR=wherever
|
99
|
26 # planemo test --engine docker_galaxy --test_data ./test-data/ --docker_extra_volume ./test-data rgToolFactory2.xml
|
48
|
27
|
|
28 import argparse
|
101
|
29 import copy
|
76
|
30 import datetime
|
103
|
31 import grp
|
76
|
32 import json
|
48
|
33 import logging
|
|
34 import os
|
|
35 import re
|
|
36 import shutil
|
|
37 import subprocess
|
|
38 import sys
|
|
39 import tarfile
|
|
40 import tempfile
|
|
41 import time
|
|
42
|
75
|
43
|
100
|
44 from bioblend import ConnectionError
|
63
|
45 from bioblend import toolshed
|
|
46
|
101
|
47 import docker
|
98
|
48
|
48
|
49 import galaxyxml.tool as gxt
|
|
50 import galaxyxml.tool.parameters as gxtp
|
|
51
|
|
52 import lxml
|
|
53
|
|
54 import yaml
|
|
55
|
|
56 myversion = "V2.1 July 2020"
|
|
57 verbose = True
|
|
58 debug = True
|
|
59 toolFactoryURL = "https://github.com/fubar2/toolfactory"
|
|
60 ourdelim = "~~~"
|
50
|
61 ALOT = 10000000 # srsly. command or test overrides use read() so just in case
|
49
|
62 STDIOXML = """<stdio>
|
|
63 <exit_code range="100:" level="debug" description="shite happens" />
|
|
64 </stdio>"""
|
48
|
65
|
111
|
66 # --input_files="$intab.input_files~~~$intab.input_CL~~~$intab.input_formats\
|
|
67 #~~~$intab.input_label~~~$intab.input_help"
|
48
|
68 IPATHPOS = 0
|
|
69 ICLPOS = 1
|
|
70 IFMTPOS = 2
|
|
71 ILABPOS = 3
|
|
72 IHELPOS = 4
|
|
73 IOCLPOS = 5
|
|
74
|
108
|
75 # --output_files "$otab.history_name~~~$otab.history_format~~~$otab.history_CL~~~$otab.history_test"
|
48
|
76 ONAMEPOS = 0
|
|
77 OFMTPOS = 1
|
|
78 OCLPOS = 2
|
49
|
79 OTESTPOS = 3
|
|
80 OOCLPOS = 4
|
|
81
|
48
|
82
|
|
83 # --additional_parameters="$i.param_name~~~$i.param_value~~~
|
|
84 # $i.param_label~~~$i.param_help~~~$i.param_type~~~$i.CL~~~i$.param_CLoverride"
|
|
85 ANAMEPOS = 0
|
|
86 AVALPOS = 1
|
|
87 ALABPOS = 2
|
|
88 AHELPPOS = 3
|
|
89 ATYPEPOS = 4
|
|
90 ACLPOS = 5
|
|
91 AOVERPOS = 6
|
|
92 AOCLPOS = 7
|
|
93
|
|
94
|
|
95 foo = len(lxml.__version__)
|
|
96 # fug you, flake8. Say my name!
|
49
|
97 FAKEEXE = "~~~REMOVE~~~ME~~~"
|
|
98 # need this until a PR/version bump to fix galaxyxml prepending the exe even
|
|
99 # with override.
|
|
100
|
48
|
101
|
|
102 def timenow():
|
75
|
103 """return current time as a string"""
|
48
|
104 return time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
|
|
105
|
|
106
|
|
107 def quote_non_numeric(s):
|
|
108 """return a prequoted string for non-numerics
|
|
109 useful for perl and Rscript parameter passing?
|
|
110 """
|
|
111 try:
|
|
112 _ = float(s)
|
|
113 return s
|
|
114 except ValueError:
|
|
115 return '"%s"' % s
|
|
116
|
|
117
|
108
|
118 html_escape_table = {"&": "&", ">": ">", "<": "<", "$": r"\$","#":"#", "$":"$"}
|
|
119 cheetah_escape_table = {"$": "\$","#":"\#"}
|
48
|
120
|
|
121 def html_escape(text):
|
|
122 """Produce entities within text."""
|
110
|
123 return "".join([html_escape_table.get(c, c) for c in text])
|
48
|
124
|
108
|
125 def cheetah_escape(text):
|
|
126 """Produce entities within text."""
|
110
|
127 return "".join([cheetah_escape_table.get(c, c) for c in text])
|
108
|
128
|
48
|
129
|
|
130 def html_unescape(text):
|
|
131 """Revert entities within text. Multiple character targets so use replace"""
|
|
132 t = text.replace("&", "&")
|
|
133 t = t.replace(">", ">")
|
|
134 t = t.replace("<", "<")
|
|
135 t = t.replace("\\$", "$")
|
108
|
136 t = t.replace("$","$")
|
|
137 t = t.replace("#","#")
|
48
|
138 return t
|
|
139
|
|
140
|
|
141 def parse_citations(citations_text):
|
75
|
142 """"""
|
48
|
143 citations = [c for c in citations_text.split("**ENTRY**") if c.strip()]
|
|
144 citation_tuples = []
|
|
145 for citation in citations:
|
|
146 if citation.startswith("doi"):
|
|
147 citation_tuples.append(("doi", citation[len("doi") :].strip()))
|
|
148 else:
|
49
|
149 citation_tuples.append(("bibtex", citation[len("bibtex") :].strip()))
|
48
|
150 return citation_tuples
|
|
151
|
|
152
|
|
153 class ScriptRunner:
|
|
154 """Wrapper for an arbitrary script
|
|
155 uses galaxyxml
|
|
156
|
|
157 """
|
|
158
|
|
159 def __init__(self, args=None):
|
|
160 """
|
|
161 prepare command line cl for running the tool here
|
|
162 and prepare elements needed for galaxyxml tool generation
|
|
163 """
|
101
|
164 self.ourcwd = os.getcwd()
|
|
165 self.ourenv = copy.deepcopy(os.environ)
|
48
|
166 self.infiles = [x.split(ourdelim) for x in args.input_files]
|
|
167 self.outfiles = [x.split(ourdelim) for x in args.output_files]
|
|
168 self.addpar = [x.split(ourdelim) for x in args.additional_parameters]
|
|
169 self.args = args
|
|
170 self.cleanuppar()
|
|
171 self.lastclredirect = None
|
|
172 self.lastxclredirect = None
|
|
173 self.cl = []
|
|
174 self.xmlcl = []
|
|
175 self.is_positional = self.args.parampass == "positional"
|
63
|
176 if self.args.sysexe:
|
49
|
177 self.executeme = self.args.sysexe
|
63
|
178 else:
|
|
179 if self.args.packages:
|
|
180 self.executeme = self.args.packages.split(",")[0].split(":")[0]
|
|
181 else:
|
|
182 self.executeme = None
|
48
|
183 aCL = self.cl.append
|
49
|
184 aXCL = self.xmlcl.append
|
48
|
185 assert args.parampass in [
|
|
186 "0",
|
|
187 "argparse",
|
|
188 "positional",
|
49
|
189 ], 'args.parampass must be "0","positional" or "argparse"'
|
48
|
190 self.tool_name = re.sub("[^a-zA-Z0-9_]+", "", args.tool_name)
|
|
191 self.tool_id = self.tool_name
|
50
|
192 self.newtool = gxt.Tool(
|
101
|
193 self.tool_name,
|
48
|
194 self.tool_id,
|
|
195 self.args.tool_version,
|
|
196 self.args.tool_desc,
|
50
|
197 FAKEEXE,
|
48
|
198 )
|
76
|
199 self.newtarpath = "toolfactory_%s.tgz" % self.tool_name
|
98
|
200 self.tooloutdir = "./tfout"
|
|
201 self.repdir = "./TF_run_report_tempdir"
|
48
|
202 self.testdir = os.path.join(self.tooloutdir, "test-data")
|
|
203 if not os.path.exists(self.tooloutdir):
|
|
204 os.mkdir(self.tooloutdir)
|
|
205 if not os.path.exists(self.testdir):
|
|
206 os.mkdir(self.testdir) # make tests directory
|
|
207 if not os.path.exists(self.repdir):
|
|
208 os.mkdir(self.repdir)
|
|
209 self.tinputs = gxtp.Inputs()
|
|
210 self.toutputs = gxtp.Outputs()
|
|
211 self.testparam = []
|
49
|
212 if self.args.script_path:
|
|
213 self.prepScript()
|
|
214 if self.args.command_override:
|
|
215 scos = open(self.args.command_override, "r").readlines()
|
|
216 self.command_override = [x.rstrip() for x in scos]
|
|
217 else:
|
|
218 self.command_override = None
|
|
219 if self.args.test_override:
|
|
220 stos = open(self.args.test_override, "r").readlines()
|
|
221 self.test_override = [x.rstrip() for x in stos]
|
|
222 else:
|
|
223 self.test_override = None
|
50
|
224 if self.args.cl_prefix: # DIY CL start
|
49
|
225 clp = self.args.cl_prefix.split(" ")
|
|
226 for c in clp:
|
|
227 aCL(c)
|
|
228 aXCL(c)
|
|
229 else:
|
56
|
230 if self.args.script_path:
|
|
231 aCL(self.executeme)
|
|
232 aCL(self.sfile)
|
|
233 aXCL(self.executeme)
|
|
234 aXCL("$runme")
|
48
|
235 else:
|
56
|
236 aCL(self.executeme) # this little CL will just run
|
|
237 aXCL(self.executeme)
|
50
|
238 self.elog = os.path.join(self.repdir, "%s_error_log.txt" % self.tool_name)
|
|
239 self.tlog = os.path.join(self.repdir, "%s_runner_log.txt" % self.tool_name)
|
48
|
240
|
|
241 if self.args.parampass == "0":
|
|
242 self.clsimple()
|
|
243 else:
|
|
244 clsuffix = []
|
|
245 xclsuffix = []
|
|
246 for i, p in enumerate(self.infiles):
|
|
247 if p[IOCLPOS] == "STDIN":
|
|
248 appendme = [
|
|
249 p[IOCLPOS],
|
|
250 p[ICLPOS],
|
|
251 p[IPATHPOS],
|
|
252 "< %s" % p[IPATHPOS],
|
|
253 ]
|
|
254 xappendme = [
|
|
255 p[IOCLPOS],
|
|
256 p[ICLPOS],
|
|
257 p[IPATHPOS],
|
|
258 "< $%s" % p[ICLPOS],
|
|
259 ]
|
|
260 else:
|
|
261 appendme = [p[IOCLPOS], p[ICLPOS], p[IPATHPOS], ""]
|
|
262 xappendme = [p[IOCLPOS], p[ICLPOS], "$%s" % p[ICLPOS], ""]
|
|
263 clsuffix.append(appendme)
|
|
264 xclsuffix.append(xappendme)
|
|
265 for i, p in enumerate(self.outfiles):
|
111
|
266 if p[OCLPOS] == "STDOUT":
|
48
|
267 self.lastclredirect = [">", p[ONAMEPOS]]
|
|
268 self.lastxclredirect = [">", "$%s" % p[OCLPOS]]
|
|
269 else:
|
|
270 clsuffix.append([p[OOCLPOS], p[OCLPOS], p[ONAMEPOS], ""])
|
49
|
271 xclsuffix.append([p[OOCLPOS], p[OCLPOS], "$%s" % p[ONAMEPOS], ""])
|
48
|
272 for p in self.addpar:
|
49
|
273 clsuffix.append([p[AOCLPOS], p[ACLPOS], p[AVALPOS], p[AOVERPOS]])
|
48
|
274 xclsuffix.append(
|
|
275 [p[AOCLPOS], p[ACLPOS], '"$%s"' % p[ANAMEPOS], p[AOVERPOS]]
|
|
276 )
|
|
277 clsuffix.sort()
|
|
278 xclsuffix.sort()
|
|
279 self.xclsuffix = xclsuffix
|
|
280 self.clsuffix = clsuffix
|
|
281 if self.args.parampass == "positional":
|
|
282 self.clpositional()
|
|
283 else:
|
|
284 self.clargparse()
|
|
285
|
|
286 def prepScript(self):
|
|
287 rx = open(self.args.script_path, "r").readlines()
|
|
288 rx = [x.rstrip() for x in rx]
|
|
289 rxcheck = [x.strip() for x in rx if x.strip() > ""]
|
|
290 assert len(rxcheck) > 0, "Supplied script is empty. Cannot run"
|
|
291 self.script = "\n".join(rx)
|
|
292 fhandle, self.sfile = tempfile.mkstemp(
|
49
|
293 prefix=self.tool_name, suffix="_%s" % (self.executeme)
|
48
|
294 )
|
|
295 tscript = open(self.sfile, "w")
|
|
296 tscript.write(self.script)
|
|
297 tscript.close()
|
109
|
298 self.escapedScript = [cheetah_escape(x) for x in rx]
|
111
|
299 self.spacedScript = [f" {x}" for x in rx]
|
49
|
300 art = "%s.%s" % (self.tool_name, self.executeme)
|
48
|
301 artifact = open(art, "wb")
|
110
|
302 artifact.write(bytes('\n'.join(self.escapedScript),'utf8'))
|
48
|
303 artifact.close()
|
|
304
|
|
305 def cleanuppar(self):
|
|
306 """ positional parameters are complicated by their numeric ordinal"""
|
|
307 for i, p in enumerate(self.infiles):
|
111
|
308 infp = copy.copy(p)
|
48
|
309 if self.args.parampass == "positional":
|
111
|
310 assert infp[
|
75
|
311 ICLPOS
|
|
312 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
111
|
313 infp[ICLPOS],
|
|
314 infp[ILABPOS],
|
48
|
315 )
|
111
|
316 icl = infp[ICLPOS]
|
|
317 infp.append(icl)
|
|
318 if infp[ICLPOS].isdigit() or self.args.parampass == "0":
|
48
|
319 scl = "input%d" % (i + 1)
|
111
|
320 infp[ICLPOS] = scl
|
|
321 self.infiles[i] = infp
|
48
|
322 for i, p in enumerate(
|
|
323 self.outfiles
|
111
|
324 ):
|
48
|
325 if self.args.parampass == "positional" and p[OCLPOS] != "STDOUT":
|
75
|
326 assert p[
|
|
327 OCLPOS
|
|
328 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
329 p[OCLPOS],
|
|
330 p[ONAMEPOS],
|
48
|
331 )
|
111
|
332 p.append(p[OCLPOS]) # keep copy
|
48
|
333 if p[OCLPOS].isdigit() or p[OCLPOS] == "STDOUT":
|
|
334 scl = p[ONAMEPOS]
|
|
335 p[OCLPOS] = scl
|
|
336 self.outfiles[i] = p
|
|
337 for i, p in enumerate(self.addpar):
|
|
338 if self.args.parampass == "positional":
|
75
|
339 assert p[
|
|
340 ACLPOS
|
|
341 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
342 p[ACLPOS],
|
|
343 p[ANAMEPOS],
|
48
|
344 )
|
|
345 p.append(p[ACLPOS])
|
|
346 if p[ACLPOS].isdigit():
|
|
347 scl = "input%s" % p[ACLPOS]
|
|
348 p[ACLPOS] = scl
|
|
349 self.addpar[i] = p
|
|
350
|
|
351 def clsimple(self):
|
75
|
352 """no parameters - uses < and > for i/o"""
|
48
|
353 aCL = self.cl.append
|
|
354 aXCL = self.xmlcl.append
|
62
|
355
|
|
356 if len(self.infiles) > 0:
|
|
357 aCL("<")
|
|
358 aCL(self.infiles[0][IPATHPOS])
|
|
359 aXCL("<")
|
|
360 aXCL("$%s" % self.infiles[0][ICLPOS])
|
|
361 if len(self.outfiles) > 0:
|
|
362 aCL(">")
|
|
363 aCL(self.outfiles[0][OCLPOS])
|
|
364 aXCL(">")
|
|
365 aXCL("$%s" % self.outfiles[0][ONAMEPOS])
|
48
|
366
|
|
367 def clpositional(self):
|
|
368 # inputs in order then params
|
|
369 aCL = self.cl.append
|
|
370 for (o_v, k, v, koverride) in self.clsuffix:
|
|
371 if " " in v:
|
|
372 aCL("%s" % v)
|
|
373 else:
|
|
374 aCL(v)
|
|
375 aXCL = self.xmlcl.append
|
|
376 for (o_v, k, v, koverride) in self.xclsuffix:
|
|
377 aXCL(v)
|
|
378 if self.lastxclredirect:
|
|
379 aXCL(self.lastxclredirect[0])
|
|
380 aXCL(self.lastxclredirect[1])
|
|
381
|
|
382 def clargparse(self):
|
75
|
383 """argparse style"""
|
48
|
384 aCL = self.cl.append
|
|
385 aXCL = self.xmlcl.append
|
|
386 # inputs then params in argparse named form
|
|
387 for (o_v, k, v, koverride) in self.xclsuffix:
|
|
388 if koverride > "":
|
|
389 k = koverride
|
|
390 elif len(k.strip()) == 1:
|
|
391 k = "-%s" % k
|
|
392 else:
|
|
393 k = "--%s" % k
|
|
394 aXCL(k)
|
|
395 aXCL(v)
|
|
396 for (o_v, k, v, koverride) in self.clsuffix:
|
|
397 if koverride > "":
|
|
398 k = koverride
|
|
399 elif len(k.strip()) == 1:
|
|
400 k = "-%s" % k
|
|
401 else:
|
|
402 k = "--%s" % k
|
|
403 aCL(k)
|
|
404 aCL(v)
|
|
405
|
|
406 def getNdash(self, newname):
|
|
407 if self.is_positional:
|
|
408 ndash = 0
|
|
409 else:
|
|
410 ndash = 2
|
|
411 if len(newname) < 2:
|
|
412 ndash = 1
|
|
413 return ndash
|
|
414
|
|
415 def doXMLparam(self):
|
|
416 """flake8 made me do this..."""
|
111
|
417 for p in self.outfiles: # --output_files "$otab.history_name~~~$otab.history_format~~~$otab.history_CL~~~$otab.history_test"
|
49
|
418 newname, newfmt, newcl, test, oldcl = p
|
108
|
419 test = test.strip()
|
48
|
420 ndash = self.getNdash(newcl)
|
111
|
421 aparm = gxtp.OutputData(name=newname, format=newfmt, num_dashes=ndash, label=newcl)
|
48
|
422 aparm.positional = self.is_positional
|
|
423 if self.is_positional:
|
|
424 if oldcl == "STDOUT":
|
|
425 aparm.positional = 9999999
|
111
|
426 aparm.command_line_override = "> $%s" % newname
|
48
|
427 else:
|
|
428 aparm.positional = int(oldcl)
|
111
|
429 aparm.command_line_override = "$%s" % newname
|
48
|
430 self.toutputs.append(aparm)
|
49
|
431 ld = None
|
108
|
432 if test.strip() > "":
|
50
|
433 if test.startswith("diff"):
|
108
|
434 c = "diff"
|
|
435 ld = 0
|
50
|
436 if test.split(":")[1].isdigit:
|
|
437 ld = int(test.split(":")[1])
|
108
|
438 tp = gxtp.TestOutput(
|
|
439 name=newcl,
|
|
440 value="%s_sample" % newcl,
|
|
441 format=newfmt,
|
|
442 compare= c,
|
|
443 lines_diff=ld,
|
|
444 )
|
|
445 elif test.startswith("sim_size"):
|
|
446 c = "sim_size"
|
|
447 tn = test.split(":")[1].strip()
|
|
448 if tn > '':
|
|
449 if '.' in tn:
|
|
450 delta = None
|
|
451 delta_frac = min(1.0,float(tn))
|
|
452 else:
|
|
453 delta = int(tn)
|
|
454 delta_frac = None
|
|
455 tp = gxtp.TestOutput(
|
|
456 name=newcl,
|
|
457 value="%s_sample" % newcl,
|
|
458 format=newfmt,
|
|
459 compare= c,
|
|
460 delta = delta,
|
|
461 delta_frac = delta_frac
|
|
462 )
|
|
463 self.testparam.append(tp)
|
48
|
464 for p in self.infiles:
|
|
465 newname = p[ICLPOS]
|
|
466 newfmt = p[IFMTPOS]
|
|
467 ndash = self.getNdash(newname)
|
|
468 if not len(p[ILABPOS]) > 0:
|
|
469 alab = p[ICLPOS]
|
|
470 else:
|
|
471 alab = p[ILABPOS]
|
|
472 aninput = gxtp.DataParam(
|
|
473 newname,
|
|
474 optional=False,
|
|
475 label=alab,
|
|
476 help=p[IHELPOS],
|
|
477 format=newfmt,
|
|
478 multiple=False,
|
|
479 num_dashes=ndash,
|
|
480 )
|
|
481 aninput.positional = self.is_positional
|
|
482 self.tinputs.append(aninput)
|
|
483 tparm = gxtp.TestParam(name=newname, value="%s_sample" % newname)
|
|
484 self.testparam.append(tparm)
|
|
485 for p in self.addpar:
|
|
486 newname, newval, newlabel, newhelp, newtype, newcl, override, oldcl = p
|
|
487 if not len(newlabel) > 0:
|
|
488 newlabel = newname
|
|
489 ndash = self.getNdash(newname)
|
|
490 if newtype == "text":
|
|
491 aparm = gxtp.TextParam(
|
|
492 newname,
|
|
493 label=newlabel,
|
|
494 help=newhelp,
|
|
495 value=newval,
|
|
496 num_dashes=ndash,
|
|
497 )
|
|
498 elif newtype == "integer":
|
|
499 aparm = gxtp.IntegerParam(
|
|
500 newname,
|
|
501 label=newname,
|
|
502 help=newhelp,
|
|
503 value=newval,
|
|
504 num_dashes=ndash,
|
|
505 )
|
|
506 elif newtype == "float":
|
|
507 aparm = gxtp.FloatParam(
|
|
508 newname,
|
|
509 label=newname,
|
|
510 help=newhelp,
|
|
511 value=newval,
|
|
512 num_dashes=ndash,
|
|
513 )
|
|
514 else:
|
|
515 raise ValueError(
|
|
516 'Unrecognised parameter type "%s" for\
|
|
517 additional parameter %s in makeXML'
|
|
518 % (newtype, newname)
|
|
519 )
|
|
520 aparm.positional = self.is_positional
|
|
521 if self.is_positional:
|
63
|
522 aparm.positional = int(oldcl)
|
48
|
523 self.tinputs.append(aparm)
|
63
|
524 tparm = gxtp.TestParam(newname, value=newval)
|
48
|
525 self.testparam.append(tparm)
|
|
526
|
|
527 def doNoXMLparam(self):
|
49
|
528 """filter style package - stdin to stdout"""
|
62
|
529 if len(self.infiles) > 0:
|
|
530 alab = self.infiles[0][ILABPOS]
|
|
531 if len(alab) == 0:
|
|
532 alab = self.infiles[0][ICLPOS]
|
|
533 max1s = (
|
|
534 "Maximum one input if parampass is 0 but multiple input files supplied - %s"
|
|
535 % str(self.infiles)
|
|
536 )
|
|
537 assert len(self.infiles) == 1, max1s
|
|
538 newname = self.infiles[0][ICLPOS]
|
|
539 aninput = gxtp.DataParam(
|
|
540 newname,
|
|
541 optional=False,
|
|
542 label=alab,
|
|
543 help=self.infiles[0][IHELPOS],
|
|
544 format=self.infiles[0][IFMTPOS],
|
|
545 multiple=False,
|
|
546 num_dashes=0,
|
|
547 )
|
|
548 aninput.command_line_override = "< $%s" % newname
|
|
549 aninput.positional = self.is_positional
|
|
550 self.tinputs.append(aninput)
|
|
551 tp = gxtp.TestParam(name=newname, value="%s_sample" % newname)
|
|
552 self.testparam.append(tp)
|
63
|
553 if len(self.outfiles) > 0:
|
62
|
554 newname = self.outfiles[0][OCLPOS]
|
|
555 newfmt = self.outfiles[0][OFMTPOS]
|
|
556 anout = gxtp.OutputData(newname, format=newfmt, num_dashes=0)
|
|
557 anout.command_line_override = "> $%s" % newname
|
|
558 anout.positional = self.is_positional
|
|
559 self.toutputs.append(anout)
|
75
|
560 tp = gxtp.TestOutput(
|
|
561 name=newname, value="%s_sample" % newname, format=newfmt
|
|
562 )
|
62
|
563 self.testparam.append(tp)
|
48
|
564
|
|
565 def makeXML(self):
|
|
566 """
|
|
567 Create a Galaxy xml tool wrapper for the new script
|
|
568 Uses galaxyhtml
|
|
569 Hmmm. How to get the command line into correct order...
|
|
570 """
|
49
|
571 if self.command_override:
|
56
|
572 self.newtool.command_override = self.command_override # config file
|
48
|
573 else:
|
56
|
574 self.newtool.command_override = self.xmlcl
|
48
|
575 if self.args.help_text:
|
|
576 helptext = open(self.args.help_text, "r").readlines()
|
108
|
577 safertext = "\n".join([cheetah_escape(x) for x in helptext])
|
|
578 if self.args.script_path:
|
110
|
579 scr = [x for x in self.spacedScript if x.strip() > ""]
|
|
580 scr.insert(0,'\n------\n\nScript::\n')
|
|
581 if len(scr) > 300:
|
|
582 scr = (
|
|
583 scr[:100]
|
|
584 + [">300 lines - stuff deleted", "......"]
|
|
585 + scr[-100:]
|
75
|
586 )
|
111
|
587 scr.append('\n')
|
110
|
588 safertext = safertext + "\n".join(scr)
|
108
|
589 self.newtool.help = safertext
|
48
|
590 else:
|
50
|
591 self.newtool.help = (
|
48
|
592 "Please ask the tool author (%s) for help \
|
|
593 as none was supplied at tool generation\n"
|
|
594 % (self.args.user_email)
|
|
595 )
|
50
|
596 self.newtool.version_command = None # do not want
|
48
|
597 requirements = gxtp.Requirements()
|
49
|
598 if self.args.packages:
|
|
599 for d in self.args.packages.split(","):
|
|
600 if ":" in d:
|
|
601 packg, ver = d.split(":")
|
|
602 else:
|
|
603 packg = d
|
|
604 ver = ""
|
50
|
605 requirements.append(
|
|
606 gxtp.Requirement("package", packg.strip(), ver.strip())
|
|
607 )
|
|
608 self.newtool.requirements = requirements
|
48
|
609 if self.args.parampass == "0":
|
|
610 self.doNoXMLparam()
|
|
611 else:
|
|
612 self.doXMLparam()
|
50
|
613 self.newtool.outputs = self.toutputs
|
|
614 self.newtool.inputs = self.tinputs
|
|
615 if self.args.script_path:
|
48
|
616 configfiles = gxtp.Configfiles()
|
109
|
617 configfiles.append(gxtp.Configfile(name="runme", text="\n".join(self.escapedScript)))
|
50
|
618 self.newtool.configfiles = configfiles
|
48
|
619 tests = gxtp.Tests()
|
|
620 test_a = gxtp.Test()
|
|
621 for tp in self.testparam:
|
|
622 test_a.append(tp)
|
|
623 tests.append(test_a)
|
50
|
624 self.newtool.tests = tests
|
|
625 self.newtool.add_comment(
|
48
|
626 "Created by %s at %s using the Galaxy Tool Factory."
|
|
627 % (self.args.user_email, timenow())
|
|
628 )
|
50
|
629 self.newtool.add_comment("Source in git at: %s" % (toolFactoryURL))
|
|
630 self.newtool.add_comment(
|
108
|
631 "Cite: Creating re-usable tools from scripts doi:10.1093/bioinformatics/bts573"
|
48
|
632 )
|
50
|
633 exml0 = self.newtool.export()
|
49
|
634 exml = exml0.replace(FAKEEXE, "") # temporary work around until PR accepted
|
50
|
635 if (
|
|
636 self.test_override
|
|
637 ): # cannot do this inside galaxyxml as it expects lxml objects for tests
|
|
638 part1 = exml.split("<tests>")[0]
|
|
639 part2 = exml.split("</tests>")[1]
|
|
640 fixed = "%s\n%s\n%s" % (part1, self.test_override, part2)
|
49
|
641 exml = fixed
|
63
|
642 exml = exml.replace('range="1:"', 'range="1000:"')
|
49
|
643 xf = open("%s.xml" % self.tool_name, "w")
|
48
|
644 xf.write(exml)
|
|
645 xf.write("\n")
|
|
646 xf.close()
|
|
647 # ready for the tarball
|
|
648
|
|
649 def run(self):
|
|
650 """
|
50
|
651 generate test outputs by running a command line
|
56
|
652 won't work if command or test override in play - planemo is the
|
50
|
653 easiest way to generate test outputs for that case so is
|
|
654 automagically selected
|
48
|
655 """
|
|
656 scl = " ".join(self.cl)
|
|
657 err = None
|
|
658 if self.args.parampass != "0":
|
56
|
659 if os.path.exists(self.elog):
|
|
660 ste = open(self.elog, "a")
|
|
661 else:
|
|
662 ste = open(self.elog, "w")
|
48
|
663 if self.lastclredirect:
|
49
|
664 sto = open(self.lastclredirect[1], "wb") # is name of an output file
|
48
|
665 else:
|
56
|
666 if os.path.exists(self.tlog):
|
|
667 sto = open(self.tlog, "a")
|
|
668 else:
|
|
669 sto = open(self.tlog, "w")
|
48
|
670 sto.write(
|
75
|
671 "## Executing Toolfactory generated command line = %s\n" % scl
|
48
|
672 )
|
|
673 sto.flush()
|
106
|
674 subp = subprocess.run(
|
|
675 self.cl, env=self.ourenv, shell=False, stdout=sto, stderr=ste
|
|
676 )
|
48
|
677 sto.close()
|
|
678 ste.close()
|
98
|
679 retval = subp.returncode
|
49
|
680 else: # work around special case - stdin and write to stdout
|
62
|
681 if len(self.infiles) > 0:
|
|
682 sti = open(self.infiles[0][IPATHPOS], "rb")
|
|
683 else:
|
63
|
684 sti = sys.stdin
|
62
|
685 if len(self.outfiles) > 0:
|
|
686 sto = open(self.outfiles[0][ONAMEPOS], "wb")
|
|
687 else:
|
|
688 sto = sys.stdout
|
106
|
689 subp = subprocess.run(
|
|
690 self.cl, env=self.ourenv, shell=False, stdout=sto, stdin=sti
|
|
691 )
|
75
|
692 sto.write("## Executing Toolfactory generated command line = %s\n" % scl)
|
98
|
693 retval = subp.returncode
|
48
|
694 sto.close()
|
|
695 sti.close()
|
|
696 if os.path.isfile(self.tlog) and os.stat(self.tlog).st_size == 0:
|
|
697 os.unlink(self.tlog)
|
|
698 if os.path.isfile(self.elog) and os.stat(self.elog).st_size == 0:
|
|
699 os.unlink(self.elog)
|
|
700 if retval != 0 and err: # problem
|
|
701 sys.stderr.write(err)
|
|
702 logging.debug("run done")
|
|
703 return retval
|
|
704
|
101
|
705 def copy_to_container(self, src, dest, container):
|
106
|
706 """Recreate the src directory tree at dest - full path included"""
|
101
|
707 idir = os.getcwd()
|
|
708 workdir = os.path.dirname(src)
|
|
709 os.chdir(workdir)
|
|
710 _, tfname = tempfile.mkstemp(suffix=".tar")
|
106
|
711 tar = tarfile.open(tfname, mode="w")
|
101
|
712 srcb = os.path.basename(src)
|
|
713 tar.add(srcb)
|
|
714 tar.close()
|
106
|
715 data = open(tfname, "rb").read()
|
101
|
716 container.put_archive(dest, data)
|
|
717 os.unlink(tfname)
|
|
718 os.chdir(idir)
|
|
719
|
|
720 def copy_from_container(self, src, dest, container):
|
106
|
721 """recreate the src directory tree at dest using docker sdk"""
|
|
722 os.makedirs(dest, exist_ok=True)
|
101
|
723 _, tfname = tempfile.mkstemp(suffix=".tar")
|
106
|
724 tf = open(tfname, "wb")
|
101
|
725 bits, stat = container.get_archive(src)
|
|
726 for chunk in bits:
|
|
727 tf.write(chunk)
|
|
728 tf.close()
|
106
|
729 tar = tarfile.open(tfname, "r")
|
101
|
730 tar.extractall(dest)
|
|
731 tar.close()
|
|
732 os.unlink(tfname)
|
|
733
|
|
734 def planemo_biodocker_test(self):
|
|
735 """planemo currently leaks dependencies if used in the same container and gets unhappy after a
|
|
736 first successful run. https://github.com/galaxyproject/planemo/issues/1078#issuecomment-731476930
|
|
737
|
|
738 Docker biocontainer has planemo with caches filled to save repeated downloads
|
|
739
|
|
740
|
99
|
741 """
|
106
|
742
|
|
743 def prun(container, tout, cl, user="biodocker"):
|
|
744 rlog = container.exec_run(cl, user=user)
|
|
745 slogl = str(rlog).split("\\n")
|
|
746 slog = "\n".join(slogl)
|
103
|
747 tout.write(f"## got rlog {slog} from {cl}\n")
|
101
|
748 if os.path.exists(self.tlog):
|
|
749 tout = open(self.tlog, "a")
|
|
750 else:
|
|
751 tout = open(self.tlog, "w")
|
|
752 planemoimage = "quay.io/fubar2/planemo-biocontainer"
|
|
753 xreal = "%s.xml" % self.tool_name
|
|
754 repname = f"{self.tool_name}_planemo_test_report.html"
|
106
|
755 ptestrep_path = os.path.join(self.repdir, repname)
|
101
|
756 tool_name = self.tool_name
|
|
757 client = docker.from_env()
|
103
|
758 tvol = client.volumes.create()
|
|
759 tvolname = tvol.name
|
|
760 destdir = "/toolfactory/ptest"
|
106
|
761 imrep = os.path.join(destdir, repname)
|
105
|
762 # need to keep the container running so sleep a while - we stop and destroy it when we are done
|
106
|
763 container = client.containers.run(
|
|
764 planemoimage,
|
|
765 "sleep 30m",
|
|
766 detach=True,
|
|
767 user="biodocker",
|
|
768 volumes={f"{tvolname}": {"bind": "/toolfactory", "mode": "rw"}},
|
|
769 )
|
103
|
770 cl = f"mkdir -p {destdir}"
|
|
771 prun(container, tout, cl, user="root")
|
|
772 cl = f"rm -rf {destdir}/*"
|
|
773 prun(container, tout, cl, user="root")
|
106
|
774 ptestpath = os.path.join(destdir, "tfout", xreal)
|
|
775 self.copy_to_container(self.tooloutdir, destdir, container)
|
107
|
776 cl = "chown -R biodocker /toolfactory"
|
103
|
777 prun(container, tout, cl, user="root")
|
101
|
778 rlog = container.exec_run(f"ls -la {destdir}")
|
103
|
779 ptestcl = f"planemo test --update_test_data --no_cleanup --test_data {destdir}/tfout/test-data --galaxy_root /home/biodocker/galaxy-central {ptestpath}"
|
101
|
780 try:
|
|
781 rlog = container.exec_run(ptestcl)
|
|
782 except:
|
|
783 e = sys.exc_info()[0]
|
103
|
784 tout.write(f"#### error: {e} from {ptestcl}\n")
|
101
|
785 # fails - used to generate test outputs
|
103
|
786 cl = f"planemo test --test_output {imrep} --no_cleanup --test_data {destdir}/tfout/test-data --galaxy_root /home/biodocker/galaxy-central {ptestpath}"
|
101
|
787 try:
|
106
|
788 prun(container, tout, cl)
|
101
|
789 except:
|
|
790 pass
|
106
|
791 testouts = tempfile.mkdtemp(suffix=None, prefix="tftemp", dir=".")
|
|
792 self.copy_from_container(destdir, testouts, container)
|
|
793 src = os.path.join(testouts, "ptest")
|
103
|
794 if os.path.isdir(src):
|
106
|
795 shutil.copytree(src, ".", dirs_exist_ok=True)
|
103
|
796 src = repname
|
|
797 if os.path.isfile(repname):
|
106
|
798 shutil.copyfile(src, ptestrep_path)
|
103
|
799 else:
|
|
800 tout.write(f"No output from run to shutil.copytree in {src}\n")
|
101
|
801 tout.close()
|
|
802 container.stop()
|
|
803 container.remove()
|
103
|
804 tvol.remove()
|
106
|
805 # shutil.rmtree(testouts)
|
101
|
806
|
63
|
807 def shedLoad(self):
|
48
|
808 """
|
63
|
809 {'deleted': False,
|
|
810 'description': 'Tools for manipulating data',
|
|
811 'id': '175812cd7caaf439',
|
|
812 'model_class': 'Category',
|
|
813 'name': 'Text Manipulation',
|
|
814 'url': '/api/categories/175812cd7caaf439'}]
|
|
815
|
|
816
|
48
|
817 """
|
49
|
818 if os.path.exists(self.tlog):
|
63
|
819 sto = open(self.tlog, "a")
|
48
|
820 else:
|
63
|
821 sto = open(self.tlog, "w")
|
48
|
822
|
75
|
823 ts = toolshed.ToolShedInstance(
|
|
824 url=self.args.toolshed_url, key=self.args.toolshed_api_key, verify=False
|
|
825 )
|
63
|
826 repos = ts.repositories.get_repositories()
|
75
|
827 rnames = [x.get("name", "?") for x in repos]
|
|
828 rids = [x.get("id", "?") for x in repos]
|
98
|
829 tfcat = "ToolFactory generated tools"
|
101
|
830 if self.tool_name not in rnames:
|
63
|
831 tscat = ts.categories.get_categories()
|
98
|
832 cnames = [x.get("name", "?").strip() for x in tscat]
|
75
|
833 cids = [x.get("id", "?") for x in tscat]
|
63
|
834 catID = None
|
98
|
835 if tfcat.strip() in cnames:
|
|
836 ci = cnames.index(tfcat)
|
63
|
837 catID = cids[ci]
|
75
|
838 res = ts.repositories.create_repository(
|
|
839 name=self.args.tool_name,
|
|
840 synopsis="Synopsis:%s" % self.args.tool_desc,
|
|
841 description=self.args.tool_desc,
|
|
842 type="unrestricted",
|
|
843 remote_repository_url=self.args.toolshed_url,
|
|
844 homepage_url=None,
|
|
845 category_ids=catID,
|
|
846 )
|
|
847 tid = res.get("id", None)
|
111
|
848 sto.write(f"#####create_repository {self.args.tool_name} tid={tid} res={res}\n")
|
49
|
849 else:
|
101
|
850 i = rnames.index(self.tool_name)
|
75
|
851 tid = rids[i]
|
100
|
852 try:
|
|
853 res = ts.repositories.update_repository(
|
106
|
854 id=tid, tar_ball_path=self.newtarpath, commit_message=None
|
|
855 )
|
111
|
856 sto.write(f"#####update res id {id} ={res}\n")
|
100
|
857 except ConnectionError:
|
106
|
858 sto.write(
|
111
|
859 "Is the toolshed running and the API key correct? Bioblend shed upload failed\n"
|
106
|
860 )
|
63
|
861 sto.close()
|
|
862
|
48
|
863 def eph_galaxy_load(self):
|
75
|
864 """load the new tool from the local toolshed after planemo uploads it"""
|
49
|
865 if os.path.exists(self.tlog):
|
50
|
866 tout = open(self.tlog, "a")
|
49
|
867 else:
|
50
|
868 tout = open(self.tlog, "w")
|
49
|
869 cll = [
|
|
870 "shed-tools",
|
|
871 "install",
|
|
872 "-g",
|
|
873 self.args.galaxy_url,
|
|
874 "--latest",
|
|
875 "-a",
|
|
876 self.args.galaxy_api_key,
|
|
877 "--name",
|
101
|
878 self.tool_name,
|
49
|
879 "--owner",
|
|
880 "fubar",
|
|
881 "--toolshed",
|
|
882 self.args.toolshed_url,
|
75
|
883 "--section_label",
|
63
|
884 "ToolFactory",
|
49
|
885 ]
|
63
|
886 tout.write("running\n%s\n" % " ".join(cll))
|
106
|
887 subp = subprocess.run(
|
|
888 cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stderr=tout, stdout=tout
|
|
889 )
|
75
|
890 tout.write(
|
101
|
891 "installed %s - got retcode %d\n" % (self.tool_name, subp.returncode)
|
75
|
892 )
|
63
|
893 tout.close()
|
98
|
894 return subp.returncode
|
63
|
895
|
99
|
896 def planemo_shedLoad(self):
|
63
|
897 """
|
|
898 planemo shed_create --shed_target testtoolshed
|
|
899 planemo shed_init --name=<name>
|
|
900 --owner=<shed_username>
|
|
901 --description=<short description>
|
|
902 [--remote_repository_url=<URL to .shed.yml on github>]
|
|
903 [--homepage_url=<Homepage for tool.>]
|
|
904 [--long_description=<long description>]
|
|
905 [--category=<category name>]*
|
66
|
906
|
|
907
|
63
|
908 planemo shed_update --check_diff --shed_target testtoolshed
|
|
909 """
|
|
910 if os.path.exists(self.tlog):
|
|
911 tout = open(self.tlog, "a")
|
48
|
912 else:
|
63
|
913 tout = open(self.tlog, "w")
|
75
|
914 ts = toolshed.ToolShedInstance(
|
|
915 url=self.args.toolshed_url, key=self.args.toolshed_api_key, verify=False
|
|
916 )
|
63
|
917 repos = ts.repositories.get_repositories()
|
75
|
918 rnames = [x.get("name", "?") for x in repos]
|
|
919 rids = [x.get("id", "?") for x in repos]
|
106
|
920 # cat = "ToolFactory generated tools"
|
101
|
921 if self.tool_name not in rnames:
|
75
|
922 cll = [
|
|
923 "planemo",
|
|
924 "shed_create",
|
|
925 "--shed_target",
|
|
926 "local",
|
|
927 "--owner",
|
|
928 "fubar",
|
|
929 "--name",
|
101
|
930 self.tool_name,
|
75
|
931 "--shed_key",
|
|
932 self.args.toolshed_api_key,
|
|
933 ]
|
63
|
934 try:
|
98
|
935 subp = subprocess.run(
|
106
|
936 cll,
|
|
937 env=self.ourenv,
|
|
938 shell=False,
|
|
939 cwd=self.tooloutdir,
|
|
940 stdout=tout,
|
|
941 stderr=tout,
|
63
|
942 )
|
|
943 except:
|
|
944 pass
|
98
|
945 if subp.returncode != 0:
|
101
|
946 tout.write("Repository %s exists\n" % self.tool_name)
|
63
|
947 else:
|
101
|
948 tout.write("initiated %s\n" % self.tool_name)
|
63
|
949 cll = [
|
|
950 "planemo",
|
|
951 "shed_upload",
|
|
952 "--shed_target",
|
|
953 "local",
|
|
954 "--owner",
|
|
955 "fubar",
|
|
956 "--name",
|
101
|
957 self.tool_name,
|
63
|
958 "--shed_key",
|
|
959 self.args.toolshed_api_key,
|
|
960 "--tar",
|
|
961 self.newtarpath,
|
|
962 ]
|
106
|
963 subp = subprocess.run(
|
|
964 cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stdout=tout, stderr=tout
|
|
965 )
|
|
966 tout.write("Ran %s got %d\n" % (" ".join(cll), subp.returncode))
|
49
|
967 tout.close()
|
98
|
968 return subp.returncode
|
48
|
969
|
76
|
970 def eph_test(self, genoutputs=True):
|
106
|
971 """problem getting jobid - ephemeris upload is the job before the one we want - but depends on how many inputs"""
|
75
|
972 if os.path.exists(self.tlog):
|
|
973 tout = open(self.tlog, "a")
|
|
974 else:
|
|
975 tout = open(self.tlog, "w")
|
|
976 cll = [
|
|
977 "shed-tools",
|
|
978 "test",
|
|
979 "-g",
|
|
980 self.args.galaxy_url,
|
|
981 "-a",
|
|
982 self.args.galaxy_api_key,
|
|
983 "--name",
|
101
|
984 self.tool_name,
|
75
|
985 "--owner",
|
|
986 "fubar",
|
|
987 ]
|
76
|
988 if genoutputs:
|
|
989 dummy, tfile = tempfile.mkstemp()
|
98
|
990 subp = subprocess.run(
|
106
|
991 cll,
|
|
992 env=self.ourenv,
|
|
993 cwd=self.ourcwd,
|
|
994 shell=False,
|
|
995 stderr=dummy,
|
|
996 stdout=dummy,
|
76
|
997 )
|
|
998
|
106
|
999 with open("tool_test_output.json", "rb") as f:
|
76
|
1000 s = json.loads(f.read())
|
106
|
1001 print("read %s" % s)
|
|
1002 cl = s["tests"][0]["data"]["job"]["command_line"].split()
|
|
1003 n = cl.index("--script_path")
|
|
1004 jobdir = cl[n + 1]
|
|
1005 jobdir = jobdir.replace('"', "")
|
|
1006 jobdir = jobdir.split("/configs")[0]
|
|
1007 print("jobdir=%s" % jobdir)
|
76
|
1008
|
106
|
1009 # "/home/ross/galthrow/database/jobs_directory/000/649/configs/tmptfxu51gs\"
|
|
1010 src = os.path.join(jobdir, "working", self.newtarpath)
|
76
|
1011 if os.path.exists(src):
|
|
1012 dest = os.path.join(self.testdir, self.newtarpath)
|
|
1013 shutil.copyfile(src, dest)
|
|
1014 else:
|
106
|
1015 tout.write(
|
|
1016 "No toolshed archive found after first ephemeris test - not a good sign"
|
|
1017 )
|
|
1018 ephouts = os.path.join(jobdir, "working", "tfout", "test-data")
|
76
|
1019 with os.scandir(ephouts) as outs:
|
|
1020 for entry in outs:
|
|
1021 if not entry.is_file():
|
|
1022 continue
|
|
1023 dest = os.path.join(self.tooloutdir, entry.name)
|
|
1024 src = os.path.join(ephouts, entry.name)
|
|
1025 shutil.copyfile(src, dest)
|
|
1026 else:
|
98
|
1027 subp = subprocess.run(
|
106
|
1028 cll,
|
|
1029 env=self.ourenv,
|
|
1030 cwd=self.ourcwd,
|
|
1031 shell=False,
|
|
1032 stderr=tout,
|
|
1033 stdout=tout,
|
|
1034 )
|
98
|
1035 tout.write("eph_test Ran %s got %d" % (" ".join(cll), subp.returncode))
|
75
|
1036 tout.close()
|
98
|
1037 return subp.returncode
|
63
|
1038
|
83
|
1039 def planemo_test_biocontainer(self, genoutputs=True):
|
|
1040 """planemo is a requirement so is available for testing but testing in a biocontainer
|
106
|
1041 requires some fiddling to use the hacked galaxy-central .venv
|
83
|
1042
|
106
|
1043 Planemo runs:
|
|
1044 python ./scripts/functional_tests.py -v --with-nosehtml --html-report-file
|
|
1045 /export/galaxy-central/database/job_working_directory/000/17/working/TF_run_report_tempdir/tacrev_planemo_test_report.html
|
|
1046 --with-xunit --xunit-file /tmp/tmpt90p7f9h/xunit.xml --with-structureddata
|
|
1047 --structured-data-file
|
|
1048 /export/galaxy-central/database/job_working_directory/000/17/working/tfout/tool_test_output.json functional.test_toolbox
|
83
|
1049
|
|
1050
|
106
|
1051 for the planemo-biocontainer,
|
|
1052 planemo test --conda_dependency_resolution --skip_venv --galaxy_root /galthrow/ rgToolFactory2.xml
|
83
|
1053
|
|
1054 """
|
|
1055 xreal = "%s.xml" % self.tool_name
|
106
|
1056 tool_test_path = os.path.join(
|
|
1057 self.repdir, f"{self.tool_name}_planemo_test_report.html"
|
|
1058 )
|
83
|
1059 if os.path.exists(self.tlog):
|
|
1060 tout = open(self.tlog, "a")
|
|
1061 else:
|
|
1062 tout = open(self.tlog, "w")
|
|
1063 if genoutputs:
|
|
1064 dummy, tfile = tempfile.mkstemp()
|
|
1065 cll = [
|
106
|
1066 ".",
|
|
1067 os.path.join(self.args.galaxy_root, ".venv", "bin", "activate"),
|
|
1068 "&&",
|
83
|
1069 "planemo",
|
|
1070 "test",
|
106
|
1071 "--test_data",
|
|
1072 self.testdir,
|
|
1073 "--test_output",
|
|
1074 tool_test_path,
|
83
|
1075 "--skip_venv",
|
|
1076 "--galaxy_root",
|
91
|
1077 self.args.galaxy_root,
|
83
|
1078 "--update_test_data",
|
98
|
1079 xreal,
|
83
|
1080 ]
|
98
|
1081 subp = subprocess.run(
|
83
|
1082 cll,
|
101
|
1083 env=self.ourenv,
|
83
|
1084 shell=False,
|
|
1085 cwd=self.tooloutdir,
|
|
1086 stderr=dummy,
|
|
1087 stdout=dummy,
|
|
1088 )
|
|
1089
|
|
1090 else:
|
|
1091 cll = [
|
106
|
1092 ".",
|
|
1093 os.path.join(self.args.galaxy_root, ".venv", "bin", "activate"),
|
|
1094 "&&",
|
83
|
1095 "planemo",
|
|
1096 "test",
|
106
|
1097 "--test_data",
|
|
1098 os.path.self.testdir,
|
|
1099 "--test_output",
|
|
1100 os.path.tool_test_path,
|
83
|
1101 "--skip_venv",
|
|
1102 "--galaxy_root",
|
91
|
1103 self.args.galaxy_root,
|
98
|
1104 xreal,
|
83
|
1105 ]
|
98
|
1106 subp = subprocess.run(
|
106
|
1107 cll,
|
|
1108 env=self.ourenv,
|
|
1109 shell=False,
|
|
1110 cwd=self.tooloutdir,
|
|
1111 stderr=tout,
|
|
1112 stdout=tout,
|
83
|
1113 )
|
|
1114 tout.close()
|
98
|
1115 return subp.returncode
|
83
|
1116
|
48
|
1117 def writeShedyml(self):
|
75
|
1118 """for planemo"""
|
49
|
1119 yuser = self.args.user_email.split("@")[0]
|
|
1120 yfname = os.path.join(self.tooloutdir, ".shed.yml")
|
|
1121 yamlf = open(yfname, "w")
|
|
1122 odict = {
|
|
1123 "name": self.tool_name,
|
|
1124 "owner": yuser,
|
|
1125 "type": "unrestricted",
|
|
1126 "description": self.args.tool_desc,
|
50
|
1127 "synopsis": self.args.tool_desc,
|
|
1128 "category": "TF Generated Tools",
|
49
|
1129 }
|
48
|
1130 yaml.dump(odict, yamlf, allow_unicode=True)
|
|
1131 yamlf.close()
|
|
1132
|
50
|
1133 def makeTool(self):
|
75
|
1134 """write xmls and input samples into place"""
|
50
|
1135 self.makeXML()
|
|
1136 if self.args.script_path:
|
|
1137 stname = os.path.join(self.tooloutdir, "%s" % (self.sfile))
|
|
1138 if not os.path.exists(stname):
|
|
1139 shutil.copyfile(self.sfile, stname)
|
|
1140 xreal = "%s.xml" % self.tool_name
|
|
1141 xout = os.path.join(self.tooloutdir, xreal)
|
|
1142 shutil.copyfile(xreal, xout)
|
|
1143 for p in self.infiles:
|
|
1144 pth = p[IPATHPOS]
|
|
1145 dest = os.path.join(self.testdir, "%s_sample" % p[ICLPOS])
|
|
1146 shutil.copyfile(pth, dest)
|
49
|
1147
|
50
|
1148 def makeToolTar(self):
|
75
|
1149 """move outputs into test-data and prepare the tarball"""
|
101
|
1150 excludeme = "_planemo_test_report.html"
|
75
|
1151
|
66
|
1152 def exclude_function(tarinfo):
|
75
|
1153 filename = tarinfo.name
|
106
|
1154 return None if filename.endswith(excludeme) else tarinfo
|
66
|
1155
|
50
|
1156 for p in self.outfiles:
|
96
|
1157 oname = p[ONAMEPOS]
|
99
|
1158 tdest = os.path.join(self.testdir, "%s_sample" % oname)
|
|
1159 if not os.path.isfile(tdest):
|
106
|
1160 src = os.path.join(self.testdir, oname)
|
99
|
1161 if os.path.isfile(src):
|
|
1162 shutil.copyfile(src, tdest)
|
|
1163 dest = os.path.join(self.repdir, "%s.sample" % (oname))
|
|
1164 shutil.copyfile(src, dest)
|
|
1165 else:
|
|
1166 print(
|
|
1167 "### problem - output file %s not found in testdir %s"
|
|
1168 % (tdest, self.testdir)
|
|
1169 )
|
50
|
1170 tf = tarfile.open(self.newtarpath, "w:gz")
|
66
|
1171 tf.add(name=self.tooloutdir, arcname=self.tool_name, filter=exclude_function)
|
50
|
1172 tf.close()
|
|
1173 shutil.copyfile(self.newtarpath, self.args.new_tool)
|
|
1174
|
|
1175 def moveRunOutputs(self):
|
75
|
1176 """need to move planemo or run outputs into toolfactory collection"""
|
50
|
1177 with os.scandir(self.tooloutdir) as outs:
|
|
1178 for entry in outs:
|
80
|
1179 if not entry.is_file():
|
|
1180 continue
|
|
1181 if "." in entry.name:
|
|
1182 nayme, ext = os.path.splitext(entry.name)
|
106
|
1183 if ext in [".yml", ".xml", ".json", ".yaml"]:
|
|
1184 ext = f"{ext}.txt"
|
80
|
1185 else:
|
|
1186 ext = ".txt"
|
|
1187 ofn = "%s%s" % (entry.name.replace(".", "_"), ext)
|
|
1188 dest = os.path.join(self.repdir, ofn)
|
|
1189 src = os.path.join(self.tooloutdir, entry.name)
|
|
1190 shutil.copyfile(src, dest)
|
|
1191 with os.scandir(self.testdir) as outs:
|
|
1192 for entry in outs:
|
106
|
1193 if (
|
|
1194 (not entry.is_file())
|
|
1195 or entry.name.endswith("_sample")
|
|
1196 or entry.name.endswith("_planemo_test_report.html")
|
|
1197 ):
|
50
|
1198 continue
|
|
1199 if "." in entry.name:
|
|
1200 nayme, ext = os.path.splitext(entry.name)
|
|
1201 else:
|
|
1202 ext = ".txt"
|
80
|
1203 newname = f"{entry.name}{ext}"
|
|
1204 dest = os.path.join(self.repdir, newname)
|
|
1205 src = os.path.join(self.testdir, entry.name)
|
50
|
1206 shutil.copyfile(src, dest)
|
|
1207
|
49
|
1208
|
48
|
1209 def main():
|
|
1210 """
|
|
1211 This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
|
49
|
1212 <command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath"
|
|
1213 --tool_name "foo" --interpreter "Rscript"
|
48
|
1214 </command>
|
|
1215 """
|
|
1216 parser = argparse.ArgumentParser()
|
|
1217 a = parser.add_argument
|
49
|
1218 a("--script_path", default=None)
|
|
1219 a("--history_test", default=None)
|
|
1220 a("--cl_prefix", default=None)
|
|
1221 a("--sysexe", default=None)
|
|
1222 a("--packages", default=None)
|
76
|
1223 a("--tool_name", default="newtool")
|
72
|
1224 a("--tool_dir", default=None)
|
48
|
1225 a("--input_files", default=[], action="append")
|
|
1226 a("--output_files", default=[], action="append")
|
|
1227 a("--user_email", default="Unknown")
|
|
1228 a("--bad_user", default=None)
|
49
|
1229 a("--make_Tool", default="runonly")
|
48
|
1230 a("--help_text", default=None)
|
|
1231 a("--tool_desc", default=None)
|
|
1232 a("--tool_version", default=None)
|
|
1233 a("--citations", default=None)
|
49
|
1234 a("--command_override", default=None)
|
|
1235 a("--test_override", default=None)
|
48
|
1236 a("--additional_parameters", action="append", default=[])
|
|
1237 a("--edit_additional_parameters", action="store_true", default=False)
|
|
1238 a("--parampass", default="positional")
|
|
1239 a("--tfout", default="./tfout")
|
|
1240 a("--new_tool", default="new_tool")
|
49
|
1241 a("--galaxy_url", default="http://localhost:8080")
|
106
|
1242 a("--toolshed_url", default="http://localhost:9009")
|
76
|
1243 # make sure this is identical to tool_sheds_conf.xml localhost != 127.0.0.1 so validation fails
|
63
|
1244 a("--toolshed_api_key", default="fakekey")
|
50
|
1245 a("--galaxy_api_key", default="fakekey")
|
|
1246 a("--galaxy_root", default="/galaxy-central")
|
101
|
1247 a("--galaxy_venv", default="/galaxy_venv")
|
48
|
1248 args = parser.parse_args()
|
|
1249 assert not args.bad_user, (
|
|
1250 'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to "admin_users" in the Galaxy configuration file'
|
|
1251 % (args.bad_user, args.bad_user)
|
|
1252 )
|
49
|
1253 assert args.tool_name, "## Tool Factory expects a tool name - eg --tool_name=DESeq"
|
48
|
1254 assert (
|
49
|
1255 args.sysexe or args.packages
|
48
|
1256 ), "## Tool Factory wrapper expects an interpreter or an executable package"
|
49
|
1257 args.input_files = [x.replace('"', "").replace("'", "") for x in args.input_files]
|
48
|
1258 # remove quotes we need to deal with spaces in CL params
|
|
1259 for i, x in enumerate(args.additional_parameters):
|
49
|
1260 args.additional_parameters[i] = args.additional_parameters[i].replace('"', "")
|
48
|
1261 r = ScriptRunner(args)
|
49
|
1262 r.writeShedyml()
|
|
1263 r.makeTool()
|
66
|
1264 if args.make_Tool == "generate":
|
106
|
1265 retcode = r.run() # for testing toolfactory itself
|
66
|
1266 r.moveRunOutputs()
|
|
1267 r.makeToolTar()
|
|
1268 else:
|
106
|
1269 r.planemo_biodocker_test() # test to make outputs and then test
|
66
|
1270 r.moveRunOutputs()
|
|
1271 r.makeToolTar()
|
101
|
1272 if args.make_Tool == "gentestinstall":
|
|
1273 r.shedLoad()
|
|
1274 r.eph_galaxy_load()
|
63
|
1275
|
48
|
1276
|
|
1277 if __name__ == "__main__":
|
|
1278 main()
|