84
|
1 # replace with shebang for biocontainer
|
48
|
2 # see https://github.com/fubar2/toolfactory
|
|
3 #
|
|
4 # copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
|
|
5 #
|
|
6 # all rights reserved
|
|
7 # Licensed under the LGPL
|
49
|
8 # suggestions for improvement and bug fixes welcome at
|
|
9 # https://github.com/fubar2/toolfactory
|
48
|
10 #
|
|
11 # July 2020: BCC was fun and I feel like rip van winkle after 5 years.
|
|
12 # Decided to
|
|
13 # 1. Fix the toolfactory so it works - done for simplest case
|
|
14 # 2. Fix planemo so the toolfactory function works
|
|
15 # 3. Rewrite bits using galaxyxml functions where that makes sense - done
|
|
16 #
|
|
17 # removed all the old complications including making the new tool use this same script
|
|
18 # galaxyxml now generates the tool xml https://github.com/hexylena/galaxyxml
|
|
19 # No support for automatic HTML file creation from arbitrary outputs
|
|
20 # essential problem is to create two command lines - one for the tool xml and a different
|
|
21 # one to run the executable with the supplied test data and settings
|
|
22 # Be simpler to write the tool, then run it with planemo and soak up the test outputs.
|
95
|
23 # well well. sh run_tests.sh --id rgtf2 --report_file tool_tests_tool_conf.html functional.test_toolbox
|
|
24 # does the needful. Use GALAXY_TEST_SAVE /foo to save outputs - only the tar.gz - not the rest sadly
|
|
25 # GALAXY_TEST_NO_CLEANUP GALAXY_TEST_TMP_DIR=wherever
|
99
|
26 # planemo test --engine docker_galaxy --test_data ./test-data/ --docker_extra_volume ./test-data rgToolFactory2.xml
|
48
|
27
|
|
28 import argparse
|
101
|
29 import copy
|
76
|
30 import datetime
|
103
|
31 import grp
|
76
|
32 import json
|
48
|
33 import logging
|
|
34 import os
|
|
35 import re
|
|
36 import shutil
|
|
37 import subprocess
|
|
38 import sys
|
|
39 import tarfile
|
|
40 import tempfile
|
|
41 import time
|
|
42
|
75
|
43
|
100
|
44 from bioblend import ConnectionError
|
63
|
45 from bioblend import toolshed
|
|
46
|
101
|
47 import docker
|
98
|
48
|
48
|
49 import galaxyxml.tool as gxt
|
|
50 import galaxyxml.tool.parameters as gxtp
|
|
51
|
|
52 import lxml
|
|
53
|
|
54 import yaml
|
|
55
|
|
56 myversion = "V2.1 July 2020"
|
|
57 verbose = True
|
|
58 debug = True
|
|
59 toolFactoryURL = "https://github.com/fubar2/toolfactory"
|
|
60 ourdelim = "~~~"
|
50
|
61 ALOT = 10000000 # srsly. command or test overrides use read() so just in case
|
49
|
62 STDIOXML = """<stdio>
|
|
63 <exit_code range="100:" level="debug" description="shite happens" />
|
|
64 </stdio>"""
|
48
|
65
|
|
66 # --input_files="$input_files~~~$CL~~~$input_formats~~~$input_label
|
|
67 # ~~~$input_help"
|
|
68 IPATHPOS = 0
|
|
69 ICLPOS = 1
|
|
70 IFMTPOS = 2
|
|
71 ILABPOS = 3
|
|
72 IHELPOS = 4
|
|
73 IOCLPOS = 5
|
|
74
|
49
|
75 # --output_files "$otab.history_name~~~$otab.history_format~~~$otab.CL~~~otab.history_test
|
48
|
76 ONAMEPOS = 0
|
|
77 OFMTPOS = 1
|
|
78 OCLPOS = 2
|
49
|
79 OTESTPOS = 3
|
|
80 OOCLPOS = 4
|
|
81
|
48
|
82
|
|
83 # --additional_parameters="$i.param_name~~~$i.param_value~~~
|
|
84 # $i.param_label~~~$i.param_help~~~$i.param_type~~~$i.CL~~~i$.param_CLoverride"
|
|
85 ANAMEPOS = 0
|
|
86 AVALPOS = 1
|
|
87 ALABPOS = 2
|
|
88 AHELPPOS = 3
|
|
89 ATYPEPOS = 4
|
|
90 ACLPOS = 5
|
|
91 AOVERPOS = 6
|
|
92 AOCLPOS = 7
|
|
93
|
|
94
|
|
95 foo = len(lxml.__version__)
|
|
96 # fug you, flake8. Say my name!
|
49
|
97 FAKEEXE = "~~~REMOVE~~~ME~~~"
|
|
98 # need this until a PR/version bump to fix galaxyxml prepending the exe even
|
|
99 # with override.
|
|
100
|
48
|
101
|
|
102 def timenow():
|
75
|
103 """return current time as a string"""
|
48
|
104 return time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
|
|
105
|
|
106
|
|
107 def quote_non_numeric(s):
|
|
108 """return a prequoted string for non-numerics
|
|
109 useful for perl and Rscript parameter passing?
|
|
110 """
|
|
111 try:
|
|
112 _ = float(s)
|
|
113 return s
|
|
114 except ValueError:
|
|
115 return '"%s"' % s
|
|
116
|
|
117
|
|
118 html_escape_table = {"&": "&", ">": ">", "<": "<", "$": r"\$"}
|
|
119
|
|
120
|
|
121 def html_escape(text):
|
|
122 """Produce entities within text."""
|
|
123 return "".join(html_escape_table.get(c, c) for c in text)
|
|
124
|
|
125
|
|
126 def html_unescape(text):
|
|
127 """Revert entities within text. Multiple character targets so use replace"""
|
|
128 t = text.replace("&", "&")
|
|
129 t = t.replace(">", ">")
|
|
130 t = t.replace("<", "<")
|
|
131 t = t.replace("\\$", "$")
|
|
132 return t
|
|
133
|
|
134
|
|
135 def parse_citations(citations_text):
|
75
|
136 """"""
|
48
|
137 citations = [c for c in citations_text.split("**ENTRY**") if c.strip()]
|
|
138 citation_tuples = []
|
|
139 for citation in citations:
|
|
140 if citation.startswith("doi"):
|
|
141 citation_tuples.append(("doi", citation[len("doi") :].strip()))
|
|
142 else:
|
49
|
143 citation_tuples.append(("bibtex", citation[len("bibtex") :].strip()))
|
48
|
144 return citation_tuples
|
|
145
|
|
146
|
|
147 class ScriptRunner:
|
|
148 """Wrapper for an arbitrary script
|
|
149 uses galaxyxml
|
|
150
|
|
151 """
|
|
152
|
|
153 def __init__(self, args=None):
|
|
154 """
|
|
155 prepare command line cl for running the tool here
|
|
156 and prepare elements needed for galaxyxml tool generation
|
|
157 """
|
101
|
158 self.ourcwd = os.getcwd()
|
|
159 self.ourenv = copy.deepcopy(os.environ)
|
48
|
160 self.infiles = [x.split(ourdelim) for x in args.input_files]
|
|
161 self.outfiles = [x.split(ourdelim) for x in args.output_files]
|
|
162 self.addpar = [x.split(ourdelim) for x in args.additional_parameters]
|
|
163 self.args = args
|
|
164 self.cleanuppar()
|
|
165 self.lastclredirect = None
|
|
166 self.lastxclredirect = None
|
|
167 self.cl = []
|
|
168 self.xmlcl = []
|
|
169 self.is_positional = self.args.parampass == "positional"
|
63
|
170 if self.args.sysexe:
|
49
|
171 self.executeme = self.args.sysexe
|
63
|
172 else:
|
|
173 if self.args.packages:
|
|
174 self.executeme = self.args.packages.split(",")[0].split(":")[0]
|
|
175 else:
|
|
176 self.executeme = None
|
48
|
177 aCL = self.cl.append
|
49
|
178 aXCL = self.xmlcl.append
|
48
|
179 assert args.parampass in [
|
|
180 "0",
|
|
181 "argparse",
|
|
182 "positional",
|
49
|
183 ], 'args.parampass must be "0","positional" or "argparse"'
|
48
|
184 self.tool_name = re.sub("[^a-zA-Z0-9_]+", "", args.tool_name)
|
|
185 self.tool_id = self.tool_name
|
50
|
186 self.newtool = gxt.Tool(
|
101
|
187 self.tool_name,
|
48
|
188 self.tool_id,
|
|
189 self.args.tool_version,
|
|
190 self.args.tool_desc,
|
50
|
191 FAKEEXE,
|
48
|
192 )
|
76
|
193 self.newtarpath = "toolfactory_%s.tgz" % self.tool_name
|
98
|
194 self.tooloutdir = "./tfout"
|
|
195 self.repdir = "./TF_run_report_tempdir"
|
48
|
196 self.testdir = os.path.join(self.tooloutdir, "test-data")
|
|
197 if not os.path.exists(self.tooloutdir):
|
|
198 os.mkdir(self.tooloutdir)
|
|
199 if not os.path.exists(self.testdir):
|
|
200 os.mkdir(self.testdir) # make tests directory
|
|
201 if not os.path.exists(self.repdir):
|
|
202 os.mkdir(self.repdir)
|
|
203 self.tinputs = gxtp.Inputs()
|
|
204 self.toutputs = gxtp.Outputs()
|
|
205 self.testparam = []
|
49
|
206 if self.args.script_path:
|
|
207 self.prepScript()
|
|
208 if self.args.command_override:
|
|
209 scos = open(self.args.command_override, "r").readlines()
|
|
210 self.command_override = [x.rstrip() for x in scos]
|
|
211 else:
|
|
212 self.command_override = None
|
|
213 if self.args.test_override:
|
|
214 stos = open(self.args.test_override, "r").readlines()
|
|
215 self.test_override = [x.rstrip() for x in stos]
|
|
216 else:
|
|
217 self.test_override = None
|
50
|
218 if self.args.cl_prefix: # DIY CL start
|
49
|
219 clp = self.args.cl_prefix.split(" ")
|
|
220 for c in clp:
|
|
221 aCL(c)
|
|
222 aXCL(c)
|
|
223 else:
|
56
|
224 if self.args.script_path:
|
|
225 aCL(self.executeme)
|
|
226 aCL(self.sfile)
|
|
227 aXCL(self.executeme)
|
|
228 aXCL("$runme")
|
48
|
229 else:
|
56
|
230 aCL(self.executeme) # this little CL will just run
|
|
231 aXCL(self.executeme)
|
50
|
232 self.elog = os.path.join(self.repdir, "%s_error_log.txt" % self.tool_name)
|
|
233 self.tlog = os.path.join(self.repdir, "%s_runner_log.txt" % self.tool_name)
|
48
|
234
|
|
235 if self.args.parampass == "0":
|
|
236 self.clsimple()
|
|
237 else:
|
|
238 clsuffix = []
|
|
239 xclsuffix = []
|
|
240 for i, p in enumerate(self.infiles):
|
|
241 if p[IOCLPOS] == "STDIN":
|
|
242 appendme = [
|
|
243 p[IOCLPOS],
|
|
244 p[ICLPOS],
|
|
245 p[IPATHPOS],
|
|
246 "< %s" % p[IPATHPOS],
|
|
247 ]
|
|
248 xappendme = [
|
|
249 p[IOCLPOS],
|
|
250 p[ICLPOS],
|
|
251 p[IPATHPOS],
|
|
252 "< $%s" % p[ICLPOS],
|
|
253 ]
|
|
254 else:
|
|
255 appendme = [p[IOCLPOS], p[ICLPOS], p[IPATHPOS], ""]
|
|
256 xappendme = [p[IOCLPOS], p[ICLPOS], "$%s" % p[ICLPOS], ""]
|
|
257 clsuffix.append(appendme)
|
|
258 xclsuffix.append(xappendme)
|
|
259 for i, p in enumerate(self.outfiles):
|
|
260 if p[OOCLPOS] == "STDOUT":
|
|
261 self.lastclredirect = [">", p[ONAMEPOS]]
|
|
262 self.lastxclredirect = [">", "$%s" % p[OCLPOS]]
|
|
263 else:
|
|
264 clsuffix.append([p[OOCLPOS], p[OCLPOS], p[ONAMEPOS], ""])
|
49
|
265 xclsuffix.append([p[OOCLPOS], p[OCLPOS], "$%s" % p[ONAMEPOS], ""])
|
48
|
266 for p in self.addpar:
|
49
|
267 clsuffix.append([p[AOCLPOS], p[ACLPOS], p[AVALPOS], p[AOVERPOS]])
|
48
|
268 xclsuffix.append(
|
|
269 [p[AOCLPOS], p[ACLPOS], '"$%s"' % p[ANAMEPOS], p[AOVERPOS]]
|
|
270 )
|
|
271 clsuffix.sort()
|
|
272 xclsuffix.sort()
|
|
273 self.xclsuffix = xclsuffix
|
|
274 self.clsuffix = clsuffix
|
|
275 if self.args.parampass == "positional":
|
|
276 self.clpositional()
|
|
277 else:
|
|
278 self.clargparse()
|
|
279
|
|
280 def prepScript(self):
|
|
281 rx = open(self.args.script_path, "r").readlines()
|
|
282 rx = [x.rstrip() for x in rx]
|
|
283 rxcheck = [x.strip() for x in rx if x.strip() > ""]
|
|
284 assert len(rxcheck) > 0, "Supplied script is empty. Cannot run"
|
|
285 self.script = "\n".join(rx)
|
|
286 fhandle, self.sfile = tempfile.mkstemp(
|
49
|
287 prefix=self.tool_name, suffix="_%s" % (self.executeme)
|
48
|
288 )
|
|
289 tscript = open(self.sfile, "w")
|
|
290 tscript.write(self.script)
|
|
291 tscript.close()
|
49
|
292 self.indentedScript = " %s" % "\n".join([" %s" % html_escape(x) for x in rx])
|
|
293 self.escapedScript = "%s" % "\n".join([" %s" % html_escape(x) for x in rx])
|
|
294 art = "%s.%s" % (self.tool_name, self.executeme)
|
48
|
295 artifact = open(art, "wb")
|
|
296 artifact.write(bytes(self.script, "utf8"))
|
|
297 artifact.close()
|
|
298
|
|
299 def cleanuppar(self):
|
|
300 """ positional parameters are complicated by their numeric ordinal"""
|
|
301 for i, p in enumerate(self.infiles):
|
|
302 if self.args.parampass == "positional":
|
75
|
303 assert p[
|
|
304 ICLPOS
|
|
305 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
306 p[ICLPOS],
|
|
307 p[ILABPOS],
|
48
|
308 )
|
|
309 p.append(p[ICLPOS])
|
|
310 if p[ICLPOS].isdigit() or self.args.parampass == "0":
|
|
311 scl = "input%d" % (i + 1)
|
|
312 p[ICLPOS] = scl
|
|
313 self.infiles[i] = p
|
|
314 for i, p in enumerate(
|
|
315 self.outfiles
|
|
316 ): # trying to automagically gather using extensions
|
|
317 if self.args.parampass == "positional" and p[OCLPOS] != "STDOUT":
|
75
|
318 assert p[
|
|
319 OCLPOS
|
|
320 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
321 p[OCLPOS],
|
|
322 p[ONAMEPOS],
|
48
|
323 )
|
|
324 p.append(p[OCLPOS])
|
|
325 if p[OCLPOS].isdigit() or p[OCLPOS] == "STDOUT":
|
|
326 scl = p[ONAMEPOS]
|
|
327 p[OCLPOS] = scl
|
|
328 self.outfiles[i] = p
|
|
329 for i, p in enumerate(self.addpar):
|
|
330 if self.args.parampass == "positional":
|
75
|
331 assert p[
|
|
332 ACLPOS
|
|
333 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
334 p[ACLPOS],
|
|
335 p[ANAMEPOS],
|
48
|
336 )
|
|
337 p.append(p[ACLPOS])
|
|
338 if p[ACLPOS].isdigit():
|
|
339 scl = "input%s" % p[ACLPOS]
|
|
340 p[ACLPOS] = scl
|
|
341 self.addpar[i] = p
|
|
342
|
|
343 def clsimple(self):
|
75
|
344 """no parameters - uses < and > for i/o"""
|
48
|
345 aCL = self.cl.append
|
|
346 aXCL = self.xmlcl.append
|
62
|
347
|
|
348 if len(self.infiles) > 0:
|
|
349 aCL("<")
|
|
350 aCL(self.infiles[0][IPATHPOS])
|
|
351 aXCL("<")
|
|
352 aXCL("$%s" % self.infiles[0][ICLPOS])
|
|
353 if len(self.outfiles) > 0:
|
|
354 aCL(">")
|
|
355 aCL(self.outfiles[0][OCLPOS])
|
|
356 aXCL(">")
|
|
357 aXCL("$%s" % self.outfiles[0][ONAMEPOS])
|
48
|
358
|
|
359 def clpositional(self):
|
|
360 # inputs in order then params
|
|
361 aCL = self.cl.append
|
|
362 for (o_v, k, v, koverride) in self.clsuffix:
|
|
363 if " " in v:
|
|
364 aCL("%s" % v)
|
|
365 else:
|
|
366 aCL(v)
|
|
367 aXCL = self.xmlcl.append
|
|
368 for (o_v, k, v, koverride) in self.xclsuffix:
|
|
369 aXCL(v)
|
|
370 if self.lastxclredirect:
|
|
371 aXCL(self.lastxclredirect[0])
|
|
372 aXCL(self.lastxclredirect[1])
|
|
373
|
|
374 def clargparse(self):
|
75
|
375 """argparse style"""
|
48
|
376 aCL = self.cl.append
|
|
377 aXCL = self.xmlcl.append
|
|
378 # inputs then params in argparse named form
|
|
379 for (o_v, k, v, koverride) in self.xclsuffix:
|
|
380 if koverride > "":
|
|
381 k = koverride
|
|
382 elif len(k.strip()) == 1:
|
|
383 k = "-%s" % k
|
|
384 else:
|
|
385 k = "--%s" % k
|
|
386 aXCL(k)
|
|
387 aXCL(v)
|
|
388 for (o_v, k, v, koverride) in self.clsuffix:
|
|
389 if koverride > "":
|
|
390 k = koverride
|
|
391 elif len(k.strip()) == 1:
|
|
392 k = "-%s" % k
|
|
393 else:
|
|
394 k = "--%s" % k
|
|
395 aCL(k)
|
|
396 aCL(v)
|
|
397
|
|
398 def getNdash(self, newname):
|
|
399 if self.is_positional:
|
|
400 ndash = 0
|
|
401 else:
|
|
402 ndash = 2
|
|
403 if len(newname) < 2:
|
|
404 ndash = 1
|
|
405 return ndash
|
|
406
|
|
407 def doXMLparam(self):
|
|
408 """flake8 made me do this..."""
|
|
409 for p in self.outfiles:
|
49
|
410 newname, newfmt, newcl, test, oldcl = p
|
48
|
411 ndash = self.getNdash(newcl)
|
|
412 aparm = gxtp.OutputData(newcl, format=newfmt, num_dashes=ndash)
|
|
413 aparm.positional = self.is_positional
|
|
414 if self.is_positional:
|
|
415 if oldcl == "STDOUT":
|
|
416 aparm.positional = 9999999
|
|
417 aparm.command_line_override = "> $%s" % newcl
|
|
418 else:
|
|
419 aparm.positional = int(oldcl)
|
|
420 aparm.command_line_override = "$%s" % newcl
|
|
421 self.toutputs.append(aparm)
|
49
|
422 usetest = None
|
|
423 ld = None
|
50
|
424 if test > "":
|
|
425 if test.startswith("diff"):
|
|
426 usetest = "diff"
|
|
427 if test.split(":")[1].isdigit:
|
|
428 ld = int(test.split(":")[1])
|
49
|
429 else:
|
|
430 usetest = test
|
50
|
431 tp = gxtp.TestOutput(
|
|
432 name=newcl,
|
|
433 value="%s_sample" % newcl,
|
|
434 format=newfmt,
|
|
435 compare=usetest,
|
|
436 lines_diff=ld,
|
|
437 delta=None,
|
|
438 )
|
48
|
439 self.testparam.append(tp)
|
|
440 for p in self.infiles:
|
|
441 newname = p[ICLPOS]
|
|
442 newfmt = p[IFMTPOS]
|
|
443 ndash = self.getNdash(newname)
|
|
444 if not len(p[ILABPOS]) > 0:
|
|
445 alab = p[ICLPOS]
|
|
446 else:
|
|
447 alab = p[ILABPOS]
|
|
448 aninput = gxtp.DataParam(
|
|
449 newname,
|
|
450 optional=False,
|
|
451 label=alab,
|
|
452 help=p[IHELPOS],
|
|
453 format=newfmt,
|
|
454 multiple=False,
|
|
455 num_dashes=ndash,
|
|
456 )
|
|
457 aninput.positional = self.is_positional
|
|
458 self.tinputs.append(aninput)
|
|
459 tparm = gxtp.TestParam(name=newname, value="%s_sample" % newname)
|
|
460 self.testparam.append(tparm)
|
|
461 for p in self.addpar:
|
|
462 newname, newval, newlabel, newhelp, newtype, newcl, override, oldcl = p
|
|
463 if not len(newlabel) > 0:
|
|
464 newlabel = newname
|
|
465 ndash = self.getNdash(newname)
|
|
466 if newtype == "text":
|
|
467 aparm = gxtp.TextParam(
|
|
468 newname,
|
|
469 label=newlabel,
|
|
470 help=newhelp,
|
|
471 value=newval,
|
|
472 num_dashes=ndash,
|
|
473 )
|
|
474 elif newtype == "integer":
|
|
475 aparm = gxtp.IntegerParam(
|
|
476 newname,
|
|
477 label=newname,
|
|
478 help=newhelp,
|
|
479 value=newval,
|
|
480 num_dashes=ndash,
|
|
481 )
|
|
482 elif newtype == "float":
|
|
483 aparm = gxtp.FloatParam(
|
|
484 newname,
|
|
485 label=newname,
|
|
486 help=newhelp,
|
|
487 value=newval,
|
|
488 num_dashes=ndash,
|
|
489 )
|
|
490 else:
|
|
491 raise ValueError(
|
|
492 'Unrecognised parameter type "%s" for\
|
|
493 additional parameter %s in makeXML'
|
|
494 % (newtype, newname)
|
|
495 )
|
|
496 aparm.positional = self.is_positional
|
|
497 if self.is_positional:
|
63
|
498 aparm.positional = int(oldcl)
|
48
|
499 self.tinputs.append(aparm)
|
63
|
500 tparm = gxtp.TestParam(newname, value=newval)
|
48
|
501 self.testparam.append(tparm)
|
|
502
|
|
503 def doNoXMLparam(self):
|
49
|
504 """filter style package - stdin to stdout"""
|
62
|
505 if len(self.infiles) > 0:
|
|
506 alab = self.infiles[0][ILABPOS]
|
|
507 if len(alab) == 0:
|
|
508 alab = self.infiles[0][ICLPOS]
|
|
509 max1s = (
|
|
510 "Maximum one input if parampass is 0 but multiple input files supplied - %s"
|
|
511 % str(self.infiles)
|
|
512 )
|
|
513 assert len(self.infiles) == 1, max1s
|
|
514 newname = self.infiles[0][ICLPOS]
|
|
515 aninput = gxtp.DataParam(
|
|
516 newname,
|
|
517 optional=False,
|
|
518 label=alab,
|
|
519 help=self.infiles[0][IHELPOS],
|
|
520 format=self.infiles[0][IFMTPOS],
|
|
521 multiple=False,
|
|
522 num_dashes=0,
|
|
523 )
|
|
524 aninput.command_line_override = "< $%s" % newname
|
|
525 aninput.positional = self.is_positional
|
|
526 self.tinputs.append(aninput)
|
|
527 tp = gxtp.TestParam(name=newname, value="%s_sample" % newname)
|
|
528 self.testparam.append(tp)
|
63
|
529 if len(self.outfiles) > 0:
|
62
|
530 newname = self.outfiles[0][OCLPOS]
|
|
531 newfmt = self.outfiles[0][OFMTPOS]
|
|
532 anout = gxtp.OutputData(newname, format=newfmt, num_dashes=0)
|
|
533 anout.command_line_override = "> $%s" % newname
|
|
534 anout.positional = self.is_positional
|
|
535 self.toutputs.append(anout)
|
75
|
536 tp = gxtp.TestOutput(
|
|
537 name=newname, value="%s_sample" % newname, format=newfmt
|
|
538 )
|
62
|
539 self.testparam.append(tp)
|
48
|
540
|
|
541 def makeXML(self):
|
|
542 """
|
|
543 Create a Galaxy xml tool wrapper for the new script
|
|
544 Uses galaxyhtml
|
|
545 Hmmm. How to get the command line into correct order...
|
|
546 """
|
49
|
547 if self.command_override:
|
56
|
548 self.newtool.command_override = self.command_override # config file
|
48
|
549 else:
|
56
|
550 self.newtool.command_override = self.xmlcl
|
48
|
551 if self.args.help_text:
|
|
552 helptext = open(self.args.help_text, "r").readlines()
|
50
|
553 safertext = [html_escape(x) for x in helptext]
|
63
|
554 if False and self.args.script_path:
|
75
|
555 scrp = self.script.split("\n")
|
|
556 scrpt = [" %s" % x for x in scrp] # try to stop templating
|
|
557 scrpt.insert(0, "```\n")
|
50
|
558 if len(scrpt) > 300:
|
75
|
559 safertext = (
|
|
560 safertext + scrpt[:100] + \
|
|
561 [">500 lines - stuff deleted", "......"] + scrpt[-100:]
|
|
562 )
|
50
|
563 else:
|
|
564 safertext = safertext + scrpt
|
|
565 safertext.append("\n```")
|
62
|
566 self.newtool.help = "\n".join([x for x in safertext])
|
48
|
567 else:
|
50
|
568 self.newtool.help = (
|
48
|
569 "Please ask the tool author (%s) for help \
|
|
570 as none was supplied at tool generation\n"
|
|
571 % (self.args.user_email)
|
|
572 )
|
50
|
573 self.newtool.version_command = None # do not want
|
48
|
574 requirements = gxtp.Requirements()
|
49
|
575 if self.args.packages:
|
|
576 for d in self.args.packages.split(","):
|
|
577 if ":" in d:
|
|
578 packg, ver = d.split(":")
|
|
579 else:
|
|
580 packg = d
|
|
581 ver = ""
|
50
|
582 requirements.append(
|
|
583 gxtp.Requirement("package", packg.strip(), ver.strip())
|
|
584 )
|
|
585 self.newtool.requirements = requirements
|
48
|
586 if self.args.parampass == "0":
|
|
587 self.doNoXMLparam()
|
|
588 else:
|
|
589 self.doXMLparam()
|
50
|
590 self.newtool.outputs = self.toutputs
|
|
591 self.newtool.inputs = self.tinputs
|
|
592 if self.args.script_path:
|
48
|
593 configfiles = gxtp.Configfiles()
|
49
|
594 configfiles.append(gxtp.Configfile(name="runme", text=self.script))
|
50
|
595 self.newtool.configfiles = configfiles
|
48
|
596 tests = gxtp.Tests()
|
|
597 test_a = gxtp.Test()
|
|
598 for tp in self.testparam:
|
|
599 test_a.append(tp)
|
|
600 tests.append(test_a)
|
50
|
601 self.newtool.tests = tests
|
|
602 self.newtool.add_comment(
|
48
|
603 "Created by %s at %s using the Galaxy Tool Factory."
|
|
604 % (self.args.user_email, timenow())
|
|
605 )
|
50
|
606 self.newtool.add_comment("Source in git at: %s" % (toolFactoryURL))
|
|
607 self.newtool.add_comment(
|
48
|
608 "Cite: Creating re-usable tools from scripts doi: \
|
|
609 10.1093/bioinformatics/bts573"
|
|
610 )
|
50
|
611 exml0 = self.newtool.export()
|
49
|
612 exml = exml0.replace(FAKEEXE, "") # temporary work around until PR accepted
|
50
|
613 if (
|
|
614 self.test_override
|
|
615 ): # cannot do this inside galaxyxml as it expects lxml objects for tests
|
|
616 part1 = exml.split("<tests>")[0]
|
|
617 part2 = exml.split("</tests>")[1]
|
|
618 fixed = "%s\n%s\n%s" % (part1, self.test_override, part2)
|
49
|
619 exml = fixed
|
63
|
620 exml = exml.replace('range="1:"', 'range="1000:"')
|
49
|
621 xf = open("%s.xml" % self.tool_name, "w")
|
48
|
622 xf.write(exml)
|
|
623 xf.write("\n")
|
|
624 xf.close()
|
|
625 # ready for the tarball
|
|
626
|
|
627 def run(self):
|
|
628 """
|
50
|
629 generate test outputs by running a command line
|
56
|
630 won't work if command or test override in play - planemo is the
|
50
|
631 easiest way to generate test outputs for that case so is
|
|
632 automagically selected
|
48
|
633 """
|
|
634 scl = " ".join(self.cl)
|
|
635 err = None
|
|
636 if self.args.parampass != "0":
|
56
|
637 if os.path.exists(self.elog):
|
|
638 ste = open(self.elog, "a")
|
|
639 else:
|
|
640 ste = open(self.elog, "w")
|
48
|
641 if self.lastclredirect:
|
49
|
642 sto = open(self.lastclredirect[1], "wb") # is name of an output file
|
48
|
643 else:
|
56
|
644 if os.path.exists(self.tlog):
|
|
645 sto = open(self.tlog, "a")
|
|
646 else:
|
|
647 sto = open(self.tlog, "w")
|
48
|
648 sto.write(
|
75
|
649 "## Executing Toolfactory generated command line = %s\n" % scl
|
48
|
650 )
|
|
651 sto.flush()
|
101
|
652 subp = subprocess.run(self.cl, env=self.ourenv, shell=False, stdout=sto, stderr=ste)
|
48
|
653 sto.close()
|
|
654 ste.close()
|
98
|
655 retval = subp.returncode
|
49
|
656 else: # work around special case - stdin and write to stdout
|
62
|
657 if len(self.infiles) > 0:
|
|
658 sti = open(self.infiles[0][IPATHPOS], "rb")
|
|
659 else:
|
63
|
660 sti = sys.stdin
|
62
|
661 if len(self.outfiles) > 0:
|
|
662 sto = open(self.outfiles[0][ONAMEPOS], "wb")
|
|
663 else:
|
|
664 sto = sys.stdout
|
101
|
665 subp = subprocess.run(self.cl, env=self.ourenv, shell=False, stdout=sto, stdin=sti)
|
75
|
666 sto.write("## Executing Toolfactory generated command line = %s\n" % scl)
|
98
|
667 retval = subp.returncode
|
48
|
668 sto.close()
|
|
669 sti.close()
|
|
670 if os.path.isfile(self.tlog) and os.stat(self.tlog).st_size == 0:
|
|
671 os.unlink(self.tlog)
|
|
672 if os.path.isfile(self.elog) and os.stat(self.elog).st_size == 0:
|
|
673 os.unlink(self.elog)
|
|
674 if retval != 0 and err: # problem
|
|
675 sys.stderr.write(err)
|
|
676 logging.debug("run done")
|
|
677 return retval
|
|
678
|
99
|
679
|
101
|
680 def copy_to_container(self, src, dest, container):
|
|
681 """ Recreate the src directory tree at dest - full path included
|
|
682 """
|
|
683 idir = os.getcwd()
|
|
684 workdir = os.path.dirname(src)
|
|
685 os.chdir(workdir)
|
|
686 _, tfname = tempfile.mkstemp(suffix=".tar")
|
|
687 tar = tarfile.open(tfname, mode='w')
|
|
688 srcb = os.path.basename(src)
|
|
689 tar.add(srcb)
|
|
690 tar.close()
|
|
691 data = open(tfname, 'rb').read()
|
|
692 container.put_archive(dest, data)
|
|
693 os.unlink(tfname)
|
|
694 os.chdir(idir)
|
|
695
|
|
696
|
|
697 def copy_from_container(self, src, dest, container):
|
|
698 """ recreate the src directory tree at dest using docker sdk
|
|
699 """
|
|
700 os.makedirs(dest,exist_ok=True)
|
|
701 _, tfname = tempfile.mkstemp(suffix=".tar")
|
|
702 tf = open(tfname,'wb')
|
|
703 bits, stat = container.get_archive(src)
|
|
704 for chunk in bits:
|
|
705 tf.write(chunk)
|
|
706 tf.close()
|
|
707 tar = tarfile.open(tfname,'r')
|
|
708 tar.extractall(dest)
|
|
709 tar.close()
|
|
710 os.unlink(tfname)
|
|
711
|
|
712
|
|
713
|
103
|
714
|
101
|
715 def planemo_biodocker_test(self):
|
|
716 """planemo currently leaks dependencies if used in the same container and gets unhappy after a
|
|
717 first successful run. https://github.com/galaxyproject/planemo/issues/1078#issuecomment-731476930
|
|
718
|
|
719 Docker biocontainer has planemo with caches filled to save repeated downloads
|
|
720
|
|
721
|
99
|
722 """
|
103
|
723 def prun(container,tout,cl,user="biodocker"):
|
|
724 rlog = container.exec_run(cl,user=user)
|
|
725 slogl = str(rlog).split('\\n')
|
|
726 slog = '\n'.join(slogl)
|
|
727 tout.write(f"## got rlog {slog} from {cl}\n")
|
|
728
|
|
729 dgroup = grp.getgrnam('docker')[2]
|
101
|
730 if os.path.exists(self.tlog):
|
|
731 tout = open(self.tlog, "a")
|
|
732 else:
|
|
733 tout = open(self.tlog, "w")
|
|
734 planemoimage = "quay.io/fubar2/planemo-biocontainer"
|
|
735 xreal = "%s.xml" % self.tool_name
|
|
736 repname = f"{self.tool_name}_planemo_test_report.html"
|
|
737 ptestrep_path = os.path.join(self.repdir,repname)
|
|
738 tool_name = self.tool_name
|
|
739 client = docker.from_env()
|
103
|
740 tvol = client.volumes.create()
|
|
741 tvolname = tvol.name
|
|
742 destdir = "/toolfactory/ptest"
|
|
743 imrep = os.path.join(destdir,repname)
|
104
|
744 #container = client.containers.run(planemoimage,'sleep 10000m', detach=True, user="biodocker",
|
|
745 container = client.containers.run(planemoimage, detach=True, user="biodocker",
|
103
|
746 network="host", volumes={f"{tvolname}": {'bind': '/toolfactory', 'mode': 'rw'}})
|
|
747 cl = f"groupmod -g {dgroup} docker"
|
|
748 prun(container, tout, cl, user="root")
|
|
749 cl = f"mkdir -p {destdir}"
|
|
750 prun(container, tout, cl, user="root")
|
|
751 cl = f"rm -rf {destdir}/*"
|
|
752 prun(container, tout, cl, user="root")
|
|
753 ptestpath = os.path.join(destdir,'tfout',xreal)
|
|
754 self.copy_to_container(self.tooloutdir,destdir,container)
|
|
755 cl ='chmod -R a+rwx /toolfactory'
|
|
756 prun(container, tout, cl, user="root")
|
101
|
757 rlog = container.exec_run(f"ls -la {destdir}")
|
103
|
758 ptestcl = f"planemo test --update_test_data --no_cleanup --test_data {destdir}/tfout/test-data --galaxy_root /home/biodocker/galaxy-central {ptestpath}"
|
101
|
759 try:
|
|
760 rlog = container.exec_run(ptestcl)
|
|
761 except:
|
|
762 e = sys.exc_info()[0]
|
103
|
763 tout.write(f"#### error: {e} from {ptestcl}\n")
|
101
|
764 # fails - used to generate test outputs
|
103
|
765 cl = f"planemo test --test_output {imrep} --no_cleanup --test_data {destdir}/tfout/test-data --galaxy_root /home/biodocker/galaxy-central {ptestpath}"
|
101
|
766 try:
|
103
|
767 prun(container,tout,cl)
|
101
|
768 except:
|
|
769 pass
|
|
770 testouts = tempfile.mkdtemp(suffix=None, prefix="tftemp",dir=".")
|
|
771 self.copy_from_container(destdir,testouts,container)
|
103
|
772 src = os.path.join(testouts,'ptest')
|
|
773 if os.path.isdir(src):
|
|
774 shutil.copytree(src, '.', dirs_exist_ok=True)
|
|
775 src = repname
|
|
776 if os.path.isfile(repname):
|
|
777 shutil.copyfile(src,ptestrep_path)
|
|
778 else:
|
|
779 tout.write(f"No output from run to shutil.copytree in {src}\n")
|
101
|
780 tout.close()
|
|
781 container.stop()
|
|
782 container.remove()
|
103
|
783 tvol.remove()
|
|
784 #shutil.rmtree(testouts)
|
101
|
785
|
63
|
786 def shedLoad(self):
|
48
|
787 """
|
63
|
788 {'deleted': False,
|
|
789 'description': 'Tools for manipulating data',
|
|
790 'id': '175812cd7caaf439',
|
|
791 'model_class': 'Category',
|
|
792 'name': 'Text Manipulation',
|
|
793 'url': '/api/categories/175812cd7caaf439'}]
|
|
794
|
|
795
|
48
|
796 """
|
49
|
797 if os.path.exists(self.tlog):
|
63
|
798 sto = open(self.tlog, "a")
|
48
|
799 else:
|
63
|
800 sto = open(self.tlog, "w")
|
48
|
801
|
75
|
802 ts = toolshed.ToolShedInstance(
|
|
803 url=self.args.toolshed_url, key=self.args.toolshed_api_key, verify=False
|
|
804 )
|
63
|
805 repos = ts.repositories.get_repositories()
|
75
|
806 rnames = [x.get("name", "?") for x in repos]
|
|
807 rids = [x.get("id", "?") for x in repos]
|
80
|
808 sto.write(f"############names={rnames} rids={rids}\n")
|
100
|
809 sto.write(f"############names={repos}\n")
|
98
|
810 tfcat = "ToolFactory generated tools"
|
101
|
811 if self.tool_name not in rnames:
|
63
|
812 tscat = ts.categories.get_categories()
|
98
|
813 cnames = [x.get("name", "?").strip() for x in tscat]
|
75
|
814 cids = [x.get("id", "?") for x in tscat]
|
63
|
815 catID = None
|
98
|
816 if tfcat.strip() in cnames:
|
|
817 ci = cnames.index(tfcat)
|
63
|
818 catID = cids[ci]
|
75
|
819 res = ts.repositories.create_repository(
|
|
820 name=self.args.tool_name,
|
|
821 synopsis="Synopsis:%s" % self.args.tool_desc,
|
|
822 description=self.args.tool_desc,
|
|
823 type="unrestricted",
|
|
824 remote_repository_url=self.args.toolshed_url,
|
|
825 homepage_url=None,
|
|
826 category_ids=catID,
|
|
827 )
|
|
828 tid = res.get("id", None)
|
80
|
829 sto.write(f"##########create res={res}\n")
|
49
|
830 else:
|
101
|
831 i = rnames.index(self.tool_name)
|
75
|
832 tid = rids[i]
|
100
|
833 try:
|
|
834 res = ts.repositories.update_repository(
|
|
835 id=tid, tar_ball_path=self.newtarpath, commit_message=None)
|
|
836 sto.write(f"#####update res={res}\n")
|
|
837 except ConnectionError:
|
|
838 sto.write("Probably no change to repository - bioblend shed upload failed\n")
|
63
|
839 sto.close()
|
|
840
|
48
|
841 def eph_galaxy_load(self):
|
75
|
842 """load the new tool from the local toolshed after planemo uploads it"""
|
49
|
843 if os.path.exists(self.tlog):
|
50
|
844 tout = open(self.tlog, "a")
|
49
|
845 else:
|
50
|
846 tout = open(self.tlog, "w")
|
49
|
847 cll = [
|
|
848 "shed-tools",
|
|
849 "install",
|
|
850 "-g",
|
|
851 self.args.galaxy_url,
|
|
852 "--latest",
|
|
853 "-a",
|
|
854 self.args.galaxy_api_key,
|
|
855 "--name",
|
101
|
856 self.tool_name,
|
49
|
857 "--owner",
|
|
858 "fubar",
|
|
859 "--toolshed",
|
|
860 self.args.toolshed_url,
|
75
|
861 "--section_label",
|
63
|
862 "ToolFactory",
|
49
|
863 ]
|
63
|
864 tout.write("running\n%s\n" % " ".join(cll))
|
101
|
865 subp = subprocess.run(cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stderr=tout, stdout=tout)
|
75
|
866 tout.write(
|
101
|
867 "installed %s - got retcode %d\n" % (self.tool_name, subp.returncode)
|
75
|
868 )
|
63
|
869 tout.close()
|
98
|
870 return subp.returncode
|
63
|
871
|
99
|
872 def planemo_shedLoad(self):
|
63
|
873 """
|
|
874 planemo shed_create --shed_target testtoolshed
|
|
875 planemo shed_init --name=<name>
|
|
876 --owner=<shed_username>
|
|
877 --description=<short description>
|
|
878 [--remote_repository_url=<URL to .shed.yml on github>]
|
|
879 [--homepage_url=<Homepage for tool.>]
|
|
880 [--long_description=<long description>]
|
|
881 [--category=<category name>]*
|
66
|
882
|
|
883
|
63
|
884 planemo shed_update --check_diff --shed_target testtoolshed
|
|
885 """
|
|
886 if os.path.exists(self.tlog):
|
|
887 tout = open(self.tlog, "a")
|
48
|
888 else:
|
63
|
889 tout = open(self.tlog, "w")
|
75
|
890 ts = toolshed.ToolShedInstance(
|
|
891 url=self.args.toolshed_url, key=self.args.toolshed_api_key, verify=False
|
|
892 )
|
63
|
893 repos = ts.repositories.get_repositories()
|
75
|
894 rnames = [x.get("name", "?") for x in repos]
|
|
895 rids = [x.get("id", "?") for x in repos]
|
|
896 #cat = "ToolFactory generated tools"
|
101
|
897 if self.tool_name not in rnames:
|
75
|
898 cll = [
|
|
899 "planemo",
|
|
900 "shed_create",
|
|
901 "--shed_target",
|
|
902 "local",
|
|
903 "--owner",
|
|
904 "fubar",
|
|
905 "--name",
|
101
|
906 self.tool_name,
|
75
|
907 "--shed_key",
|
|
908 self.args.toolshed_api_key,
|
|
909 ]
|
63
|
910 try:
|
98
|
911 subp = subprocess.run(
|
101
|
912 cll, env=self.ourenv, shell=False, cwd=self.tooloutdir, stdout=tout, stderr=tout
|
63
|
913 )
|
|
914 except:
|
|
915 pass
|
98
|
916 if subp.returncode != 0:
|
101
|
917 tout.write("Repository %s exists\n" % self.tool_name)
|
63
|
918 else:
|
101
|
919 tout.write("initiated %s\n" % self.tool_name)
|
63
|
920 cll = [
|
|
921 "planemo",
|
|
922 "shed_upload",
|
|
923 "--shed_target",
|
|
924 "local",
|
|
925 "--owner",
|
|
926 "fubar",
|
|
927 "--name",
|
101
|
928 self.tool_name,
|
63
|
929 "--shed_key",
|
|
930 self.args.toolshed_api_key,
|
|
931 "--tar",
|
|
932 self.newtarpath,
|
|
933 ]
|
101
|
934 subp = subprocess.run(cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stdout=tout, stderr=tout)
|
98
|
935 tout.write("Ran %s got %d\n" % (" ".join(cll),subp.returncode))
|
49
|
936 tout.close()
|
98
|
937 return subp.returncode
|
48
|
938
|
76
|
939 def eph_test(self, genoutputs=True):
|
|
940 """problem getting jobid - ephemeris upload is the job before the one we want - but depends on how many inputs
|
|
941 """
|
75
|
942 if os.path.exists(self.tlog):
|
|
943 tout = open(self.tlog, "a")
|
|
944 else:
|
|
945 tout = open(self.tlog, "w")
|
|
946 cll = [
|
|
947 "shed-tools",
|
|
948 "test",
|
|
949 "-g",
|
|
950 self.args.galaxy_url,
|
|
951 "-a",
|
|
952 self.args.galaxy_api_key,
|
|
953 "--name",
|
101
|
954 self.tool_name,
|
75
|
955 "--owner",
|
|
956 "fubar",
|
|
957 ]
|
76
|
958 if genoutputs:
|
|
959 dummy, tfile = tempfile.mkstemp()
|
98
|
960 subp = subprocess.run(
|
101
|
961 cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stderr=dummy, stdout=dummy
|
76
|
962 )
|
|
963
|
|
964 with open('tool_test_output.json','rb') as f:
|
|
965 s = json.loads(f.read())
|
|
966 print('read %s' % s)
|
|
967 cl = s['tests'][0]['data']['job']['command_line'].split()
|
|
968 n = cl.index('--script_path')
|
|
969 jobdir = cl[n+1]
|
|
970 jobdir = jobdir.replace('"','')
|
|
971 jobdir = jobdir.split('/configs')[0]
|
|
972 print('jobdir=%s' % jobdir)
|
|
973
|
|
974 #"/home/ross/galthrow/database/jobs_directory/000/649/configs/tmptfxu51gs\"
|
|
975 src = os.path.join(jobdir,'working',self.newtarpath)
|
|
976 if os.path.exists(src):
|
|
977 dest = os.path.join(self.testdir, self.newtarpath)
|
|
978 shutil.copyfile(src, dest)
|
|
979 else:
|
|
980 tout.write('No toolshed archive found after first ephemeris test - not a good sign')
|
|
981 ephouts = os.path.join(jobdir,'working','tfout','test-data')
|
|
982 with os.scandir(ephouts) as outs:
|
|
983 for entry in outs:
|
|
984 if not entry.is_file():
|
|
985 continue
|
|
986 dest = os.path.join(self.tooloutdir, entry.name)
|
|
987 src = os.path.join(ephouts, entry.name)
|
|
988 shutil.copyfile(src, dest)
|
|
989 else:
|
98
|
990 subp = subprocess.run(
|
101
|
991 cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stderr=tout, stdout=tout)
|
98
|
992 tout.write("eph_test Ran %s got %d" % (" ".join(cll), subp.returncode))
|
75
|
993 tout.close()
|
98
|
994 return subp.returncode
|
63
|
995
|
83
|
996 def planemo_test_biocontainer(self, genoutputs=True):
|
|
997 """planemo is a requirement so is available for testing but testing in a biocontainer
|
|
998 requires some fiddling to use the hacked galaxy-central .venv
|
|
999
|
|
1000 Planemo runs:
|
|
1001 python ./scripts/functional_tests.py -v --with-nosehtml --html-report-file
|
|
1002 /export/galaxy-central/database/job_working_directory/000/17/working/TF_run_report_tempdir/tacrev_planemo_test_report.html
|
|
1003 --with-xunit --xunit-file /tmp/tmpt90p7f9h/xunit.xml --with-structureddata
|
|
1004 --structured-data-file
|
|
1005 /export/galaxy-central/database/job_working_directory/000/17/working/tfout/tool_test_output.json functional.test_toolbox
|
|
1006
|
|
1007
|
|
1008 for the planemo-biocontainer,
|
|
1009 planemo test --conda_dependency_resolution --skip_venv --galaxy_root /galthrow/ rgToolFactory2.xml
|
|
1010
|
|
1011 """
|
|
1012 xreal = "%s.xml" % self.tool_name
|
|
1013 tool_test_path = os.path.join(self.repdir,f"{self.tool_name}_planemo_test_report.html")
|
|
1014 if os.path.exists(self.tlog):
|
|
1015 tout = open(self.tlog, "a")
|
|
1016 else:
|
|
1017 tout = open(self.tlog, "w")
|
|
1018 if genoutputs:
|
|
1019 dummy, tfile = tempfile.mkstemp()
|
|
1020 cll = [
|
95
|
1021 ".", os.path.join(self.args.galaxy_root,'.venv','bin','activate'),"&&",
|
83
|
1022 "planemo",
|
|
1023 "test",
|
98
|
1024 "--test_data", self.testdir,
|
|
1025 "--test_output", tool_test_path,
|
83
|
1026 "--skip_venv",
|
|
1027 "--galaxy_root",
|
91
|
1028 self.args.galaxy_root,
|
83
|
1029 "--update_test_data",
|
98
|
1030 xreal,
|
83
|
1031 ]
|
98
|
1032 subp = subprocess.run(
|
83
|
1033 cll,
|
101
|
1034 env=self.ourenv,
|
83
|
1035 shell=False,
|
|
1036 cwd=self.tooloutdir,
|
|
1037 stderr=dummy,
|
|
1038 stdout=dummy,
|
|
1039 )
|
|
1040
|
|
1041 else:
|
|
1042 cll = [
|
95
|
1043 ".", os.path.join(self.args.galaxy_root,'.venv','bin','activate'),"&&",
|
83
|
1044 "planemo",
|
|
1045 "test",
|
98
|
1046 "--test_data", os.path.self.testdir,
|
|
1047 "--test_output", os.path.tool_test_path,
|
83
|
1048 "--skip_venv",
|
|
1049 "--galaxy_root",
|
91
|
1050 self.args.galaxy_root,
|
98
|
1051 xreal,
|
83
|
1052 ]
|
98
|
1053 subp = subprocess.run(
|
101
|
1054 cll, env=self.ourenv, shell=False, cwd=self.tooloutdir, stderr=tout, stdout=tout
|
83
|
1055 )
|
|
1056 tout.close()
|
98
|
1057 return subp.returncode
|
83
|
1058
|
|
1059
|
48
|
1060 def writeShedyml(self):
|
75
|
1061 """for planemo"""
|
49
|
1062 yuser = self.args.user_email.split("@")[0]
|
|
1063 yfname = os.path.join(self.tooloutdir, ".shed.yml")
|
|
1064 yamlf = open(yfname, "w")
|
|
1065 odict = {
|
|
1066 "name": self.tool_name,
|
|
1067 "owner": yuser,
|
|
1068 "type": "unrestricted",
|
|
1069 "description": self.args.tool_desc,
|
50
|
1070 "synopsis": self.args.tool_desc,
|
|
1071 "category": "TF Generated Tools",
|
49
|
1072 }
|
48
|
1073 yaml.dump(odict, yamlf, allow_unicode=True)
|
|
1074 yamlf.close()
|
|
1075
|
50
|
1076 def makeTool(self):
|
75
|
1077 """write xmls and input samples into place"""
|
50
|
1078 self.makeXML()
|
|
1079 if self.args.script_path:
|
|
1080 stname = os.path.join(self.tooloutdir, "%s" % (self.sfile))
|
|
1081 if not os.path.exists(stname):
|
|
1082 shutil.copyfile(self.sfile, stname)
|
|
1083 xreal = "%s.xml" % self.tool_name
|
|
1084 xout = os.path.join(self.tooloutdir, xreal)
|
|
1085 shutil.copyfile(xreal, xout)
|
|
1086 for p in self.infiles:
|
|
1087 pth = p[IPATHPOS]
|
|
1088 dest = os.path.join(self.testdir, "%s_sample" % p[ICLPOS])
|
|
1089 shutil.copyfile(pth, dest)
|
49
|
1090
|
50
|
1091 def makeToolTar(self):
|
75
|
1092 """move outputs into test-data and prepare the tarball"""
|
101
|
1093 excludeme = "_planemo_test_report.html"
|
75
|
1094
|
66
|
1095 def exclude_function(tarinfo):
|
75
|
1096 filename = tarinfo.name
|
|
1097 return (
|
|
1098 None
|
101
|
1099 if filename.endswith(excludeme)
|
75
|
1100 else tarinfo
|
|
1101 )
|
66
|
1102
|
50
|
1103 for p in self.outfiles:
|
96
|
1104 oname = p[ONAMEPOS]
|
99
|
1105 tdest = os.path.join(self.testdir, "%s_sample" % oname)
|
|
1106 if not os.path.isfile(tdest):
|
|
1107 src = os.path.join(self.testdir,oname)
|
|
1108 if os.path.isfile(src):
|
|
1109 shutil.copyfile(src, tdest)
|
|
1110 dest = os.path.join(self.repdir, "%s.sample" % (oname))
|
|
1111 shutil.copyfile(src, dest)
|
|
1112 else:
|
|
1113 print(
|
|
1114 "### problem - output file %s not found in testdir %s"
|
|
1115 % (tdest, self.testdir)
|
|
1116 )
|
50
|
1117 tf = tarfile.open(self.newtarpath, "w:gz")
|
66
|
1118 tf.add(name=self.tooloutdir, arcname=self.tool_name, filter=exclude_function)
|
50
|
1119 tf.close()
|
|
1120 shutil.copyfile(self.newtarpath, self.args.new_tool)
|
|
1121
|
|
1122 def moveRunOutputs(self):
|
75
|
1123 """need to move planemo or run outputs into toolfactory collection"""
|
50
|
1124 with os.scandir(self.tooloutdir) as outs:
|
|
1125 for entry in outs:
|
80
|
1126 if not entry.is_file():
|
|
1127 continue
|
|
1128 if "." in entry.name:
|
|
1129 nayme, ext = os.path.splitext(entry.name)
|
|
1130 if ext in ['.yml','.xml','.json','.yaml']:
|
|
1131 ext = f'{ext}.txt'
|
|
1132 else:
|
|
1133 ext = ".txt"
|
|
1134 ofn = "%s%s" % (entry.name.replace(".", "_"), ext)
|
|
1135 dest = os.path.join(self.repdir, ofn)
|
|
1136 src = os.path.join(self.tooloutdir, entry.name)
|
|
1137 shutil.copyfile(src, dest)
|
|
1138 with os.scandir(self.testdir) as outs:
|
|
1139 for entry in outs:
|
101
|
1140 if (not entry.is_file()) or entry.name.endswith('_sample') or entry.name.endswith("_planemo_test_report.html"):
|
50
|
1141 continue
|
|
1142 if "." in entry.name:
|
|
1143 nayme, ext = os.path.splitext(entry.name)
|
|
1144 else:
|
|
1145 ext = ".txt"
|
80
|
1146 newname = f"{entry.name}{ext}"
|
|
1147 dest = os.path.join(self.repdir, newname)
|
|
1148 src = os.path.join(self.testdir, entry.name)
|
50
|
1149 shutil.copyfile(src, dest)
|
|
1150
|
49
|
1151
|
76
|
1152
|
48
|
1153 def main():
|
|
1154 """
|
|
1155 This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
|
49
|
1156 <command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath"
|
|
1157 --tool_name "foo" --interpreter "Rscript"
|
48
|
1158 </command>
|
|
1159 """
|
|
1160 parser = argparse.ArgumentParser()
|
|
1161 a = parser.add_argument
|
49
|
1162 a("--script_path", default=None)
|
|
1163 a("--history_test", default=None)
|
|
1164 a("--cl_prefix", default=None)
|
|
1165 a("--sysexe", default=None)
|
|
1166 a("--packages", default=None)
|
76
|
1167 a("--tool_name", default="newtool")
|
72
|
1168 a("--tool_dir", default=None)
|
48
|
1169 a("--input_files", default=[], action="append")
|
|
1170 a("--output_files", default=[], action="append")
|
|
1171 a("--user_email", default="Unknown")
|
|
1172 a("--bad_user", default=None)
|
49
|
1173 a("--make_Tool", default="runonly")
|
48
|
1174 a("--help_text", default=None)
|
|
1175 a("--tool_desc", default=None)
|
|
1176 a("--tool_version", default=None)
|
|
1177 a("--citations", default=None)
|
49
|
1178 a("--command_override", default=None)
|
|
1179 a("--test_override", default=None)
|
48
|
1180 a("--additional_parameters", action="append", default=[])
|
|
1181 a("--edit_additional_parameters", action="store_true", default=False)
|
|
1182 a("--parampass", default="positional")
|
|
1183 a("--tfout", default="./tfout")
|
|
1184 a("--new_tool", default="new_tool")
|
49
|
1185 a("--galaxy_url", default="http://localhost:8080")
|
75
|
1186 a(
|
76
|
1187 "--toolshed_url", default="http://localhost:9009")
|
|
1188 # make sure this is identical to tool_sheds_conf.xml localhost != 127.0.0.1 so validation fails
|
63
|
1189 a("--toolshed_api_key", default="fakekey")
|
50
|
1190 a("--galaxy_api_key", default="fakekey")
|
|
1191 a("--galaxy_root", default="/galaxy-central")
|
101
|
1192 a("--galaxy_venv", default="/galaxy_venv")
|
48
|
1193 args = parser.parse_args()
|
|
1194 assert not args.bad_user, (
|
|
1195 'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to "admin_users" in the Galaxy configuration file'
|
|
1196 % (args.bad_user, args.bad_user)
|
|
1197 )
|
49
|
1198 assert args.tool_name, "## Tool Factory expects a tool name - eg --tool_name=DESeq"
|
48
|
1199 assert (
|
49
|
1200 args.sysexe or args.packages
|
48
|
1201 ), "## Tool Factory wrapper expects an interpreter or an executable package"
|
49
|
1202 args.input_files = [x.replace('"', "").replace("'", "") for x in args.input_files]
|
48
|
1203 # remove quotes we need to deal with spaces in CL params
|
|
1204 for i, x in enumerate(args.additional_parameters):
|
49
|
1205 args.additional_parameters[i] = args.additional_parameters[i].replace('"', "")
|
48
|
1206 r = ScriptRunner(args)
|
49
|
1207 r.writeShedyml()
|
|
1208 r.makeTool()
|
66
|
1209 if args.make_Tool == "generate":
|
101
|
1210 retcode = r.run() # for testing toolfactory itself
|
66
|
1211 r.moveRunOutputs()
|
|
1212 r.makeToolTar()
|
|
1213 else:
|
101
|
1214 r.planemo_biodocker_test() # test to make outputs and then test
|
66
|
1215 r.moveRunOutputs()
|
|
1216 r.makeToolTar()
|
101
|
1217 if args.make_Tool == "gentestinstall":
|
|
1218 r.shedLoad()
|
|
1219 r.eph_galaxy_load()
|
63
|
1220
|
48
|
1221
|
|
1222 if __name__ == "__main__":
|
|
1223 main()
|