84
|
1 # replace with shebang for biocontainer
|
48
|
2 # see https://github.com/fubar2/toolfactory
|
|
3 #
|
|
4 # copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
|
|
5 #
|
|
6 # all rights reserved
|
|
7 # Licensed under the LGPL
|
49
|
8 # suggestions for improvement and bug fixes welcome at
|
|
9 # https://github.com/fubar2/toolfactory
|
48
|
10 #
|
|
11 # July 2020: BCC was fun and I feel like rip van winkle after 5 years.
|
|
12 # Decided to
|
|
13 # 1. Fix the toolfactory so it works - done for simplest case
|
|
14 # 2. Fix planemo so the toolfactory function works
|
|
15 # 3. Rewrite bits using galaxyxml functions where that makes sense - done
|
|
16 #
|
|
17 # removed all the old complications including making the new tool use this same script
|
|
18 # galaxyxml now generates the tool xml https://github.com/hexylena/galaxyxml
|
|
19 # No support for automatic HTML file creation from arbitrary outputs
|
|
20 # essential problem is to create two command lines - one for the tool xml and a different
|
|
21 # one to run the executable with the supplied test data and settings
|
|
22 # Be simpler to write the tool, then run it with planemo and soak up the test outputs.
|
95
|
23 # well well. sh run_tests.sh --id rgtf2 --report_file tool_tests_tool_conf.html functional.test_toolbox
|
|
24 # does the needful. Use GALAXY_TEST_SAVE /foo to save outputs - only the tar.gz - not the rest sadly
|
|
25 # GALAXY_TEST_NO_CLEANUP GALAXY_TEST_TMP_DIR=wherever
|
99
|
26 # planemo test --engine docker_galaxy --test_data ./test-data/ --docker_extra_volume ./test-data rgToolFactory2.xml
|
48
|
27
|
|
28 import argparse
|
101
|
29 import copy
|
76
|
30 import datetime
|
103
|
31 import grp
|
76
|
32 import json
|
48
|
33 import logging
|
|
34 import os
|
|
35 import re
|
|
36 import shutil
|
|
37 import subprocess
|
|
38 import sys
|
|
39 import tarfile
|
|
40 import tempfile
|
|
41 import time
|
|
42
|
75
|
43
|
100
|
44 from bioblend import ConnectionError
|
63
|
45 from bioblend import toolshed
|
|
46
|
101
|
47 import docker
|
98
|
48
|
48
|
49 import galaxyxml.tool as gxt
|
|
50 import galaxyxml.tool.parameters as gxtp
|
|
51
|
|
52 import lxml
|
|
53
|
|
54 import yaml
|
|
55
|
|
56 myversion = "V2.1 July 2020"
|
|
57 verbose = True
|
|
58 debug = True
|
|
59 toolFactoryURL = "https://github.com/fubar2/toolfactory"
|
|
60 ourdelim = "~~~"
|
50
|
61 ALOT = 10000000 # srsly. command or test overrides use read() so just in case
|
49
|
62 STDIOXML = """<stdio>
|
|
63 <exit_code range="100:" level="debug" description="shite happens" />
|
|
64 </stdio>"""
|
48
|
65
|
|
66 # --input_files="$input_files~~~$CL~~~$input_formats~~~$input_label
|
|
67 # ~~~$input_help"
|
|
68 IPATHPOS = 0
|
|
69 ICLPOS = 1
|
|
70 IFMTPOS = 2
|
|
71 ILABPOS = 3
|
|
72 IHELPOS = 4
|
|
73 IOCLPOS = 5
|
|
74
|
49
|
75 # --output_files "$otab.history_name~~~$otab.history_format~~~$otab.CL~~~otab.history_test
|
48
|
76 ONAMEPOS = 0
|
|
77 OFMTPOS = 1
|
|
78 OCLPOS = 2
|
49
|
79 OTESTPOS = 3
|
|
80 OOCLPOS = 4
|
|
81
|
48
|
82
|
|
83 # --additional_parameters="$i.param_name~~~$i.param_value~~~
|
|
84 # $i.param_label~~~$i.param_help~~~$i.param_type~~~$i.CL~~~i$.param_CLoverride"
|
|
85 ANAMEPOS = 0
|
|
86 AVALPOS = 1
|
|
87 ALABPOS = 2
|
|
88 AHELPPOS = 3
|
|
89 ATYPEPOS = 4
|
|
90 ACLPOS = 5
|
|
91 AOVERPOS = 6
|
|
92 AOCLPOS = 7
|
|
93
|
|
94
|
|
95 foo = len(lxml.__version__)
|
|
96 # fug you, flake8. Say my name!
|
49
|
97 FAKEEXE = "~~~REMOVE~~~ME~~~"
|
|
98 # need this until a PR/version bump to fix galaxyxml prepending the exe even
|
|
99 # with override.
|
|
100
|
48
|
101
|
|
102 def timenow():
|
75
|
103 """return current time as a string"""
|
48
|
104 return time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(time.time()))
|
|
105
|
|
106
|
|
107 def quote_non_numeric(s):
|
|
108 """return a prequoted string for non-numerics
|
|
109 useful for perl and Rscript parameter passing?
|
|
110 """
|
|
111 try:
|
|
112 _ = float(s)
|
|
113 return s
|
|
114 except ValueError:
|
|
115 return '"%s"' % s
|
|
116
|
|
117
|
|
118 html_escape_table = {"&": "&", ">": ">", "<": "<", "$": r"\$"}
|
|
119
|
|
120
|
|
121 def html_escape(text):
|
|
122 """Produce entities within text."""
|
|
123 return "".join(html_escape_table.get(c, c) for c in text)
|
|
124
|
|
125
|
|
126 def html_unescape(text):
|
|
127 """Revert entities within text. Multiple character targets so use replace"""
|
|
128 t = text.replace("&", "&")
|
|
129 t = t.replace(">", ">")
|
|
130 t = t.replace("<", "<")
|
|
131 t = t.replace("\\$", "$")
|
|
132 return t
|
|
133
|
|
134
|
|
135 def parse_citations(citations_text):
|
75
|
136 """"""
|
48
|
137 citations = [c for c in citations_text.split("**ENTRY**") if c.strip()]
|
|
138 citation_tuples = []
|
|
139 for citation in citations:
|
|
140 if citation.startswith("doi"):
|
|
141 citation_tuples.append(("doi", citation[len("doi") :].strip()))
|
|
142 else:
|
49
|
143 citation_tuples.append(("bibtex", citation[len("bibtex") :].strip()))
|
48
|
144 return citation_tuples
|
|
145
|
|
146
|
|
147 class ScriptRunner:
|
|
148 """Wrapper for an arbitrary script
|
|
149 uses galaxyxml
|
|
150
|
|
151 """
|
|
152
|
|
153 def __init__(self, args=None):
|
|
154 """
|
|
155 prepare command line cl for running the tool here
|
|
156 and prepare elements needed for galaxyxml tool generation
|
|
157 """
|
101
|
158 self.ourcwd = os.getcwd()
|
|
159 self.ourenv = copy.deepcopy(os.environ)
|
48
|
160 self.infiles = [x.split(ourdelim) for x in args.input_files]
|
|
161 self.outfiles = [x.split(ourdelim) for x in args.output_files]
|
|
162 self.addpar = [x.split(ourdelim) for x in args.additional_parameters]
|
|
163 self.args = args
|
|
164 self.cleanuppar()
|
|
165 self.lastclredirect = None
|
|
166 self.lastxclredirect = None
|
|
167 self.cl = []
|
|
168 self.xmlcl = []
|
|
169 self.is_positional = self.args.parampass == "positional"
|
63
|
170 if self.args.sysexe:
|
49
|
171 self.executeme = self.args.sysexe
|
63
|
172 else:
|
|
173 if self.args.packages:
|
|
174 self.executeme = self.args.packages.split(",")[0].split(":")[0]
|
|
175 else:
|
|
176 self.executeme = None
|
48
|
177 aCL = self.cl.append
|
49
|
178 aXCL = self.xmlcl.append
|
48
|
179 assert args.parampass in [
|
|
180 "0",
|
|
181 "argparse",
|
|
182 "positional",
|
49
|
183 ], 'args.parampass must be "0","positional" or "argparse"'
|
48
|
184 self.tool_name = re.sub("[^a-zA-Z0-9_]+", "", args.tool_name)
|
|
185 self.tool_id = self.tool_name
|
50
|
186 self.newtool = gxt.Tool(
|
101
|
187 self.tool_name,
|
48
|
188 self.tool_id,
|
|
189 self.args.tool_version,
|
|
190 self.args.tool_desc,
|
50
|
191 FAKEEXE,
|
48
|
192 )
|
76
|
193 self.newtarpath = "toolfactory_%s.tgz" % self.tool_name
|
98
|
194 self.tooloutdir = "./tfout"
|
|
195 self.repdir = "./TF_run_report_tempdir"
|
48
|
196 self.testdir = os.path.join(self.tooloutdir, "test-data")
|
|
197 if not os.path.exists(self.tooloutdir):
|
|
198 os.mkdir(self.tooloutdir)
|
|
199 if not os.path.exists(self.testdir):
|
|
200 os.mkdir(self.testdir) # make tests directory
|
|
201 if not os.path.exists(self.repdir):
|
|
202 os.mkdir(self.repdir)
|
|
203 self.tinputs = gxtp.Inputs()
|
|
204 self.toutputs = gxtp.Outputs()
|
|
205 self.testparam = []
|
49
|
206 if self.args.script_path:
|
|
207 self.prepScript()
|
|
208 if self.args.command_override:
|
|
209 scos = open(self.args.command_override, "r").readlines()
|
|
210 self.command_override = [x.rstrip() for x in scos]
|
|
211 else:
|
|
212 self.command_override = None
|
|
213 if self.args.test_override:
|
|
214 stos = open(self.args.test_override, "r").readlines()
|
|
215 self.test_override = [x.rstrip() for x in stos]
|
|
216 else:
|
|
217 self.test_override = None
|
50
|
218 if self.args.cl_prefix: # DIY CL start
|
49
|
219 clp = self.args.cl_prefix.split(" ")
|
|
220 for c in clp:
|
|
221 aCL(c)
|
|
222 aXCL(c)
|
|
223 else:
|
56
|
224 if self.args.script_path:
|
|
225 aCL(self.executeme)
|
|
226 aCL(self.sfile)
|
|
227 aXCL(self.executeme)
|
|
228 aXCL("$runme")
|
48
|
229 else:
|
56
|
230 aCL(self.executeme) # this little CL will just run
|
|
231 aXCL(self.executeme)
|
50
|
232 self.elog = os.path.join(self.repdir, "%s_error_log.txt" % self.tool_name)
|
|
233 self.tlog = os.path.join(self.repdir, "%s_runner_log.txt" % self.tool_name)
|
48
|
234
|
|
235 if self.args.parampass == "0":
|
|
236 self.clsimple()
|
|
237 else:
|
|
238 clsuffix = []
|
|
239 xclsuffix = []
|
|
240 for i, p in enumerate(self.infiles):
|
|
241 if p[IOCLPOS] == "STDIN":
|
|
242 appendme = [
|
|
243 p[IOCLPOS],
|
|
244 p[ICLPOS],
|
|
245 p[IPATHPOS],
|
|
246 "< %s" % p[IPATHPOS],
|
|
247 ]
|
|
248 xappendme = [
|
|
249 p[IOCLPOS],
|
|
250 p[ICLPOS],
|
|
251 p[IPATHPOS],
|
|
252 "< $%s" % p[ICLPOS],
|
|
253 ]
|
|
254 else:
|
|
255 appendme = [p[IOCLPOS], p[ICLPOS], p[IPATHPOS], ""]
|
|
256 xappendme = [p[IOCLPOS], p[ICLPOS], "$%s" % p[ICLPOS], ""]
|
|
257 clsuffix.append(appendme)
|
|
258 xclsuffix.append(xappendme)
|
|
259 for i, p in enumerate(self.outfiles):
|
|
260 if p[OOCLPOS] == "STDOUT":
|
|
261 self.lastclredirect = [">", p[ONAMEPOS]]
|
|
262 self.lastxclredirect = [">", "$%s" % p[OCLPOS]]
|
|
263 else:
|
|
264 clsuffix.append([p[OOCLPOS], p[OCLPOS], p[ONAMEPOS], ""])
|
49
|
265 xclsuffix.append([p[OOCLPOS], p[OCLPOS], "$%s" % p[ONAMEPOS], ""])
|
48
|
266 for p in self.addpar:
|
49
|
267 clsuffix.append([p[AOCLPOS], p[ACLPOS], p[AVALPOS], p[AOVERPOS]])
|
48
|
268 xclsuffix.append(
|
|
269 [p[AOCLPOS], p[ACLPOS], '"$%s"' % p[ANAMEPOS], p[AOVERPOS]]
|
|
270 )
|
|
271 clsuffix.sort()
|
|
272 xclsuffix.sort()
|
|
273 self.xclsuffix = xclsuffix
|
|
274 self.clsuffix = clsuffix
|
|
275 if self.args.parampass == "positional":
|
|
276 self.clpositional()
|
|
277 else:
|
|
278 self.clargparse()
|
|
279
|
|
280 def prepScript(self):
|
|
281 rx = open(self.args.script_path, "r").readlines()
|
|
282 rx = [x.rstrip() for x in rx]
|
|
283 rxcheck = [x.strip() for x in rx if x.strip() > ""]
|
|
284 assert len(rxcheck) > 0, "Supplied script is empty. Cannot run"
|
|
285 self.script = "\n".join(rx)
|
|
286 fhandle, self.sfile = tempfile.mkstemp(
|
49
|
287 prefix=self.tool_name, suffix="_%s" % (self.executeme)
|
48
|
288 )
|
|
289 tscript = open(self.sfile, "w")
|
|
290 tscript.write(self.script)
|
|
291 tscript.close()
|
49
|
292 self.indentedScript = " %s" % "\n".join([" %s" % html_escape(x) for x in rx])
|
|
293 self.escapedScript = "%s" % "\n".join([" %s" % html_escape(x) for x in rx])
|
|
294 art = "%s.%s" % (self.tool_name, self.executeme)
|
48
|
295 artifact = open(art, "wb")
|
|
296 artifact.write(bytes(self.script, "utf8"))
|
|
297 artifact.close()
|
|
298
|
|
299 def cleanuppar(self):
|
|
300 """ positional parameters are complicated by their numeric ordinal"""
|
|
301 for i, p in enumerate(self.infiles):
|
|
302 if self.args.parampass == "positional":
|
75
|
303 assert p[
|
|
304 ICLPOS
|
|
305 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
306 p[ICLPOS],
|
|
307 p[ILABPOS],
|
48
|
308 )
|
|
309 p.append(p[ICLPOS])
|
|
310 if p[ICLPOS].isdigit() or self.args.parampass == "0":
|
|
311 scl = "input%d" % (i + 1)
|
|
312 p[ICLPOS] = scl
|
|
313 self.infiles[i] = p
|
|
314 for i, p in enumerate(
|
|
315 self.outfiles
|
|
316 ): # trying to automagically gather using extensions
|
|
317 if self.args.parampass == "positional" and p[OCLPOS] != "STDOUT":
|
75
|
318 assert p[
|
|
319 OCLPOS
|
|
320 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
321 p[OCLPOS],
|
|
322 p[ONAMEPOS],
|
48
|
323 )
|
|
324 p.append(p[OCLPOS])
|
|
325 if p[OCLPOS].isdigit() or p[OCLPOS] == "STDOUT":
|
|
326 scl = p[ONAMEPOS]
|
|
327 p[OCLPOS] = scl
|
|
328 self.outfiles[i] = p
|
|
329 for i, p in enumerate(self.addpar):
|
|
330 if self.args.parampass == "positional":
|
75
|
331 assert p[
|
|
332 ACLPOS
|
|
333 ].isdigit(), "Positional parameters must be ordinal integers - got %s for %s" % (
|
|
334 p[ACLPOS],
|
|
335 p[ANAMEPOS],
|
48
|
336 )
|
|
337 p.append(p[ACLPOS])
|
|
338 if p[ACLPOS].isdigit():
|
|
339 scl = "input%s" % p[ACLPOS]
|
|
340 p[ACLPOS] = scl
|
|
341 self.addpar[i] = p
|
|
342
|
|
343 def clsimple(self):
|
75
|
344 """no parameters - uses < and > for i/o"""
|
48
|
345 aCL = self.cl.append
|
|
346 aXCL = self.xmlcl.append
|
62
|
347
|
|
348 if len(self.infiles) > 0:
|
|
349 aCL("<")
|
|
350 aCL(self.infiles[0][IPATHPOS])
|
|
351 aXCL("<")
|
|
352 aXCL("$%s" % self.infiles[0][ICLPOS])
|
|
353 if len(self.outfiles) > 0:
|
|
354 aCL(">")
|
|
355 aCL(self.outfiles[0][OCLPOS])
|
|
356 aXCL(">")
|
|
357 aXCL("$%s" % self.outfiles[0][ONAMEPOS])
|
48
|
358
|
|
359 def clpositional(self):
|
|
360 # inputs in order then params
|
|
361 aCL = self.cl.append
|
|
362 for (o_v, k, v, koverride) in self.clsuffix:
|
|
363 if " " in v:
|
|
364 aCL("%s" % v)
|
|
365 else:
|
|
366 aCL(v)
|
|
367 aXCL = self.xmlcl.append
|
|
368 for (o_v, k, v, koverride) in self.xclsuffix:
|
|
369 aXCL(v)
|
|
370 if self.lastxclredirect:
|
|
371 aXCL(self.lastxclredirect[0])
|
|
372 aXCL(self.lastxclredirect[1])
|
|
373
|
|
374 def clargparse(self):
|
75
|
375 """argparse style"""
|
48
|
376 aCL = self.cl.append
|
|
377 aXCL = self.xmlcl.append
|
|
378 # inputs then params in argparse named form
|
|
379 for (o_v, k, v, koverride) in self.xclsuffix:
|
|
380 if koverride > "":
|
|
381 k = koverride
|
|
382 elif len(k.strip()) == 1:
|
|
383 k = "-%s" % k
|
|
384 else:
|
|
385 k = "--%s" % k
|
|
386 aXCL(k)
|
|
387 aXCL(v)
|
|
388 for (o_v, k, v, koverride) in self.clsuffix:
|
|
389 if koverride > "":
|
|
390 k = koverride
|
|
391 elif len(k.strip()) == 1:
|
|
392 k = "-%s" % k
|
|
393 else:
|
|
394 k = "--%s" % k
|
|
395 aCL(k)
|
|
396 aCL(v)
|
|
397
|
|
398 def getNdash(self, newname):
|
|
399 if self.is_positional:
|
|
400 ndash = 0
|
|
401 else:
|
|
402 ndash = 2
|
|
403 if len(newname) < 2:
|
|
404 ndash = 1
|
|
405 return ndash
|
|
406
|
|
407 def doXMLparam(self):
|
|
408 """flake8 made me do this..."""
|
|
409 for p in self.outfiles:
|
49
|
410 newname, newfmt, newcl, test, oldcl = p
|
48
|
411 ndash = self.getNdash(newcl)
|
|
412 aparm = gxtp.OutputData(newcl, format=newfmt, num_dashes=ndash)
|
|
413 aparm.positional = self.is_positional
|
|
414 if self.is_positional:
|
|
415 if oldcl == "STDOUT":
|
|
416 aparm.positional = 9999999
|
|
417 aparm.command_line_override = "> $%s" % newcl
|
|
418 else:
|
|
419 aparm.positional = int(oldcl)
|
|
420 aparm.command_line_override = "$%s" % newcl
|
|
421 self.toutputs.append(aparm)
|
49
|
422 usetest = None
|
|
423 ld = None
|
50
|
424 if test > "":
|
|
425 if test.startswith("diff"):
|
|
426 usetest = "diff"
|
|
427 if test.split(":")[1].isdigit:
|
|
428 ld = int(test.split(":")[1])
|
49
|
429 else:
|
|
430 usetest = test
|
50
|
431 tp = gxtp.TestOutput(
|
|
432 name=newcl,
|
|
433 value="%s_sample" % newcl,
|
|
434 format=newfmt,
|
|
435 compare=usetest,
|
|
436 lines_diff=ld,
|
|
437 delta=None,
|
|
438 )
|
48
|
439 self.testparam.append(tp)
|
|
440 for p in self.infiles:
|
|
441 newname = p[ICLPOS]
|
|
442 newfmt = p[IFMTPOS]
|
|
443 ndash = self.getNdash(newname)
|
|
444 if not len(p[ILABPOS]) > 0:
|
|
445 alab = p[ICLPOS]
|
|
446 else:
|
|
447 alab = p[ILABPOS]
|
|
448 aninput = gxtp.DataParam(
|
|
449 newname,
|
|
450 optional=False,
|
|
451 label=alab,
|
|
452 help=p[IHELPOS],
|
|
453 format=newfmt,
|
|
454 multiple=False,
|
|
455 num_dashes=ndash,
|
|
456 )
|
|
457 aninput.positional = self.is_positional
|
|
458 self.tinputs.append(aninput)
|
|
459 tparm = gxtp.TestParam(name=newname, value="%s_sample" % newname)
|
|
460 self.testparam.append(tparm)
|
|
461 for p in self.addpar:
|
|
462 newname, newval, newlabel, newhelp, newtype, newcl, override, oldcl = p
|
|
463 if not len(newlabel) > 0:
|
|
464 newlabel = newname
|
|
465 ndash = self.getNdash(newname)
|
|
466 if newtype == "text":
|
|
467 aparm = gxtp.TextParam(
|
|
468 newname,
|
|
469 label=newlabel,
|
|
470 help=newhelp,
|
|
471 value=newval,
|
|
472 num_dashes=ndash,
|
|
473 )
|
|
474 elif newtype == "integer":
|
|
475 aparm = gxtp.IntegerParam(
|
|
476 newname,
|
|
477 label=newname,
|
|
478 help=newhelp,
|
|
479 value=newval,
|
|
480 num_dashes=ndash,
|
|
481 )
|
|
482 elif newtype == "float":
|
|
483 aparm = gxtp.FloatParam(
|
|
484 newname,
|
|
485 label=newname,
|
|
486 help=newhelp,
|
|
487 value=newval,
|
|
488 num_dashes=ndash,
|
|
489 )
|
|
490 else:
|
|
491 raise ValueError(
|
|
492 'Unrecognised parameter type "%s" for\
|
|
493 additional parameter %s in makeXML'
|
|
494 % (newtype, newname)
|
|
495 )
|
|
496 aparm.positional = self.is_positional
|
|
497 if self.is_positional:
|
63
|
498 aparm.positional = int(oldcl)
|
48
|
499 self.tinputs.append(aparm)
|
63
|
500 tparm = gxtp.TestParam(newname, value=newval)
|
48
|
501 self.testparam.append(tparm)
|
|
502
|
|
503 def doNoXMLparam(self):
|
49
|
504 """filter style package - stdin to stdout"""
|
62
|
505 if len(self.infiles) > 0:
|
|
506 alab = self.infiles[0][ILABPOS]
|
|
507 if len(alab) == 0:
|
|
508 alab = self.infiles[0][ICLPOS]
|
|
509 max1s = (
|
|
510 "Maximum one input if parampass is 0 but multiple input files supplied - %s"
|
|
511 % str(self.infiles)
|
|
512 )
|
|
513 assert len(self.infiles) == 1, max1s
|
|
514 newname = self.infiles[0][ICLPOS]
|
|
515 aninput = gxtp.DataParam(
|
|
516 newname,
|
|
517 optional=False,
|
|
518 label=alab,
|
|
519 help=self.infiles[0][IHELPOS],
|
|
520 format=self.infiles[0][IFMTPOS],
|
|
521 multiple=False,
|
|
522 num_dashes=0,
|
|
523 )
|
|
524 aninput.command_line_override = "< $%s" % newname
|
|
525 aninput.positional = self.is_positional
|
|
526 self.tinputs.append(aninput)
|
|
527 tp = gxtp.TestParam(name=newname, value="%s_sample" % newname)
|
|
528 self.testparam.append(tp)
|
63
|
529 if len(self.outfiles) > 0:
|
62
|
530 newname = self.outfiles[0][OCLPOS]
|
|
531 newfmt = self.outfiles[0][OFMTPOS]
|
|
532 anout = gxtp.OutputData(newname, format=newfmt, num_dashes=0)
|
|
533 anout.command_line_override = "> $%s" % newname
|
|
534 anout.positional = self.is_positional
|
|
535 self.toutputs.append(anout)
|
75
|
536 tp = gxtp.TestOutput(
|
|
537 name=newname, value="%s_sample" % newname, format=newfmt
|
|
538 )
|
62
|
539 self.testparam.append(tp)
|
48
|
540
|
|
541 def makeXML(self):
|
|
542 """
|
|
543 Create a Galaxy xml tool wrapper for the new script
|
|
544 Uses galaxyhtml
|
|
545 Hmmm. How to get the command line into correct order...
|
|
546 """
|
49
|
547 if self.command_override:
|
56
|
548 self.newtool.command_override = self.command_override # config file
|
48
|
549 else:
|
56
|
550 self.newtool.command_override = self.xmlcl
|
48
|
551 if self.args.help_text:
|
|
552 helptext = open(self.args.help_text, "r").readlines()
|
50
|
553 safertext = [html_escape(x) for x in helptext]
|
63
|
554 if False and self.args.script_path:
|
75
|
555 scrp = self.script.split("\n")
|
|
556 scrpt = [" %s" % x for x in scrp] # try to stop templating
|
|
557 scrpt.insert(0, "```\n")
|
50
|
558 if len(scrpt) > 300:
|
75
|
559 safertext = (
|
|
560 safertext + scrpt[:100] + \
|
|
561 [">500 lines - stuff deleted", "......"] + scrpt[-100:]
|
|
562 )
|
50
|
563 else:
|
|
564 safertext = safertext + scrpt
|
|
565 safertext.append("\n```")
|
62
|
566 self.newtool.help = "\n".join([x for x in safertext])
|
48
|
567 else:
|
50
|
568 self.newtool.help = (
|
48
|
569 "Please ask the tool author (%s) for help \
|
|
570 as none was supplied at tool generation\n"
|
|
571 % (self.args.user_email)
|
|
572 )
|
50
|
573 self.newtool.version_command = None # do not want
|
48
|
574 requirements = gxtp.Requirements()
|
49
|
575 if self.args.packages:
|
|
576 for d in self.args.packages.split(","):
|
|
577 if ":" in d:
|
|
578 packg, ver = d.split(":")
|
|
579 else:
|
|
580 packg = d
|
|
581 ver = ""
|
50
|
582 requirements.append(
|
|
583 gxtp.Requirement("package", packg.strip(), ver.strip())
|
|
584 )
|
|
585 self.newtool.requirements = requirements
|
48
|
586 if self.args.parampass == "0":
|
|
587 self.doNoXMLparam()
|
|
588 else:
|
|
589 self.doXMLparam()
|
50
|
590 self.newtool.outputs = self.toutputs
|
|
591 self.newtool.inputs = self.tinputs
|
|
592 if self.args.script_path:
|
48
|
593 configfiles = gxtp.Configfiles()
|
49
|
594 configfiles.append(gxtp.Configfile(name="runme", text=self.script))
|
50
|
595 self.newtool.configfiles = configfiles
|
48
|
596 tests = gxtp.Tests()
|
|
597 test_a = gxtp.Test()
|
|
598 for tp in self.testparam:
|
|
599 test_a.append(tp)
|
|
600 tests.append(test_a)
|
50
|
601 self.newtool.tests = tests
|
|
602 self.newtool.add_comment(
|
48
|
603 "Created by %s at %s using the Galaxy Tool Factory."
|
|
604 % (self.args.user_email, timenow())
|
|
605 )
|
50
|
606 self.newtool.add_comment("Source in git at: %s" % (toolFactoryURL))
|
|
607 self.newtool.add_comment(
|
48
|
608 "Cite: Creating re-usable tools from scripts doi: \
|
|
609 10.1093/bioinformatics/bts573"
|
|
610 )
|
50
|
611 exml0 = self.newtool.export()
|
49
|
612 exml = exml0.replace(FAKEEXE, "") # temporary work around until PR accepted
|
50
|
613 if (
|
|
614 self.test_override
|
|
615 ): # cannot do this inside galaxyxml as it expects lxml objects for tests
|
|
616 part1 = exml.split("<tests>")[0]
|
|
617 part2 = exml.split("</tests>")[1]
|
|
618 fixed = "%s\n%s\n%s" % (part1, self.test_override, part2)
|
49
|
619 exml = fixed
|
63
|
620 exml = exml.replace('range="1:"', 'range="1000:"')
|
49
|
621 xf = open("%s.xml" % self.tool_name, "w")
|
48
|
622 xf.write(exml)
|
|
623 xf.write("\n")
|
|
624 xf.close()
|
|
625 # ready for the tarball
|
|
626
|
|
627 def run(self):
|
|
628 """
|
50
|
629 generate test outputs by running a command line
|
56
|
630 won't work if command or test override in play - planemo is the
|
50
|
631 easiest way to generate test outputs for that case so is
|
|
632 automagically selected
|
48
|
633 """
|
|
634 scl = " ".join(self.cl)
|
|
635 err = None
|
|
636 if self.args.parampass != "0":
|
56
|
637 if os.path.exists(self.elog):
|
|
638 ste = open(self.elog, "a")
|
|
639 else:
|
|
640 ste = open(self.elog, "w")
|
48
|
641 if self.lastclredirect:
|
49
|
642 sto = open(self.lastclredirect[1], "wb") # is name of an output file
|
48
|
643 else:
|
56
|
644 if os.path.exists(self.tlog):
|
|
645 sto = open(self.tlog, "a")
|
|
646 else:
|
|
647 sto = open(self.tlog, "w")
|
48
|
648 sto.write(
|
75
|
649 "## Executing Toolfactory generated command line = %s\n" % scl
|
48
|
650 )
|
|
651 sto.flush()
|
101
|
652 subp = subprocess.run(self.cl, env=self.ourenv, shell=False, stdout=sto, stderr=ste)
|
48
|
653 sto.close()
|
|
654 ste.close()
|
98
|
655 retval = subp.returncode
|
49
|
656 else: # work around special case - stdin and write to stdout
|
62
|
657 if len(self.infiles) > 0:
|
|
658 sti = open(self.infiles[0][IPATHPOS], "rb")
|
|
659 else:
|
63
|
660 sti = sys.stdin
|
62
|
661 if len(self.outfiles) > 0:
|
|
662 sto = open(self.outfiles[0][ONAMEPOS], "wb")
|
|
663 else:
|
|
664 sto = sys.stdout
|
101
|
665 subp = subprocess.run(self.cl, env=self.ourenv, shell=False, stdout=sto, stdin=sti)
|
75
|
666 sto.write("## Executing Toolfactory generated command line = %s\n" % scl)
|
98
|
667 retval = subp.returncode
|
48
|
668 sto.close()
|
|
669 sti.close()
|
|
670 if os.path.isfile(self.tlog) and os.stat(self.tlog).st_size == 0:
|
|
671 os.unlink(self.tlog)
|
|
672 if os.path.isfile(self.elog) and os.stat(self.elog).st_size == 0:
|
|
673 os.unlink(self.elog)
|
|
674 if retval != 0 and err: # problem
|
|
675 sys.stderr.write(err)
|
|
676 logging.debug("run done")
|
|
677 return retval
|
|
678
|
99
|
679
|
101
|
680 def copy_to_container(self, src, dest, container):
|
|
681 """ Recreate the src directory tree at dest - full path included
|
|
682 """
|
|
683 idir = os.getcwd()
|
|
684 workdir = os.path.dirname(src)
|
|
685 os.chdir(workdir)
|
|
686 _, tfname = tempfile.mkstemp(suffix=".tar")
|
|
687 tar = tarfile.open(tfname, mode='w')
|
|
688 srcb = os.path.basename(src)
|
|
689 tar.add(srcb)
|
|
690 tar.close()
|
|
691 data = open(tfname, 'rb').read()
|
|
692 container.put_archive(dest, data)
|
|
693 os.unlink(tfname)
|
|
694 os.chdir(idir)
|
|
695
|
|
696
|
|
697 def copy_from_container(self, src, dest, container):
|
|
698 """ recreate the src directory tree at dest using docker sdk
|
|
699 """
|
|
700 os.makedirs(dest,exist_ok=True)
|
|
701 _, tfname = tempfile.mkstemp(suffix=".tar")
|
|
702 tf = open(tfname,'wb')
|
|
703 bits, stat = container.get_archive(src)
|
|
704 for chunk in bits:
|
|
705 tf.write(chunk)
|
|
706 tf.close()
|
|
707 tar = tarfile.open(tfname,'r')
|
|
708 tar.extractall(dest)
|
|
709 tar.close()
|
|
710 os.unlink(tfname)
|
|
711
|
|
712
|
|
713
|
103
|
714
|
101
|
715 def planemo_biodocker_test(self):
|
|
716 """planemo currently leaks dependencies if used in the same container and gets unhappy after a
|
|
717 first successful run. https://github.com/galaxyproject/planemo/issues/1078#issuecomment-731476930
|
|
718
|
|
719 Docker biocontainer has planemo with caches filled to save repeated downloads
|
|
720
|
|
721
|
99
|
722 """
|
103
|
723 def prun(container,tout,cl,user="biodocker"):
|
|
724 rlog = container.exec_run(cl,user=user)
|
|
725 slogl = str(rlog).split('\\n')
|
|
726 slog = '\n'.join(slogl)
|
|
727 tout.write(f"## got rlog {slog} from {cl}\n")
|
|
728
|
|
729 dgroup = grp.getgrnam('docker')[2]
|
101
|
730 if os.path.exists(self.tlog):
|
|
731 tout = open(self.tlog, "a")
|
|
732 else:
|
|
733 tout = open(self.tlog, "w")
|
|
734 planemoimage = "quay.io/fubar2/planemo-biocontainer"
|
|
735 xreal = "%s.xml" % self.tool_name
|
|
736 repname = f"{self.tool_name}_planemo_test_report.html"
|
|
737 ptestrep_path = os.path.join(self.repdir,repname)
|
|
738 tool_name = self.tool_name
|
|
739 client = docker.from_env()
|
103
|
740 tvol = client.volumes.create()
|
|
741 tvolname = tvol.name
|
|
742 destdir = "/toolfactory/ptest"
|
|
743 imrep = os.path.join(destdir,repname)
|
|
744 container = client.containers.run(planemoimage,'sleep 10000m', detach=True, user="biodocker",
|
|
745 network="host", volumes={f"{tvolname}": {'bind': '/toolfactory', 'mode': 'rw'}})
|
|
746 cl = f"groupmod -g {dgroup} docker"
|
|
747 prun(container, tout, cl, user="root")
|
|
748 cl = f"mkdir -p {destdir}"
|
|
749 prun(container, tout, cl, user="root")
|
|
750 cl = f"rm -rf {destdir}/*"
|
|
751 prun(container, tout, cl, user="root")
|
|
752 ptestpath = os.path.join(destdir,'tfout',xreal)
|
|
753 self.copy_to_container(self.tooloutdir,destdir,container)
|
|
754 cl ='chmod -R a+rwx /toolfactory'
|
|
755 prun(container, tout, cl, user="root")
|
101
|
756 rlog = container.exec_run(f"ls -la {destdir}")
|
103
|
757 ptestcl = f"planemo test --update_test_data --no_cleanup --test_data {destdir}/tfout/test-data --galaxy_root /home/biodocker/galaxy-central {ptestpath}"
|
101
|
758 try:
|
|
759 rlog = container.exec_run(ptestcl)
|
|
760 except:
|
|
761 e = sys.exc_info()[0]
|
103
|
762 tout.write(f"#### error: {e} from {ptestcl}\n")
|
101
|
763 # fails - used to generate test outputs
|
103
|
764 cl = f"planemo test --test_output {imrep} --no_cleanup --test_data {destdir}/tfout/test-data --galaxy_root /home/biodocker/galaxy-central {ptestpath}"
|
101
|
765 try:
|
103
|
766 prun(container,tout,cl)
|
101
|
767 except:
|
|
768 pass
|
|
769 testouts = tempfile.mkdtemp(suffix=None, prefix="tftemp",dir=".")
|
|
770 self.copy_from_container(destdir,testouts,container)
|
103
|
771 src = os.path.join(testouts,'ptest')
|
|
772 if os.path.isdir(src):
|
|
773 shutil.copytree(src, '.', dirs_exist_ok=True)
|
|
774 src = repname
|
|
775 if os.path.isfile(repname):
|
|
776 shutil.copyfile(src,ptestrep_path)
|
|
777 else:
|
|
778 tout.write(f"No output from run to shutil.copytree in {src}\n")
|
101
|
779 tout.close()
|
|
780 container.stop()
|
|
781 container.remove()
|
103
|
782 tvol.remove()
|
|
783 #shutil.rmtree(testouts)
|
101
|
784
|
63
|
785 def shedLoad(self):
|
48
|
786 """
|
63
|
787 {'deleted': False,
|
|
788 'description': 'Tools for manipulating data',
|
|
789 'id': '175812cd7caaf439',
|
|
790 'model_class': 'Category',
|
|
791 'name': 'Text Manipulation',
|
|
792 'url': '/api/categories/175812cd7caaf439'}]
|
|
793
|
|
794
|
48
|
795 """
|
49
|
796 if os.path.exists(self.tlog):
|
63
|
797 sto = open(self.tlog, "a")
|
48
|
798 else:
|
63
|
799 sto = open(self.tlog, "w")
|
48
|
800
|
75
|
801 ts = toolshed.ToolShedInstance(
|
|
802 url=self.args.toolshed_url, key=self.args.toolshed_api_key, verify=False
|
|
803 )
|
63
|
804 repos = ts.repositories.get_repositories()
|
75
|
805 rnames = [x.get("name", "?") for x in repos]
|
|
806 rids = [x.get("id", "?") for x in repos]
|
80
|
807 sto.write(f"############names={rnames} rids={rids}\n")
|
100
|
808 sto.write(f"############names={repos}\n")
|
98
|
809 tfcat = "ToolFactory generated tools"
|
101
|
810 if self.tool_name not in rnames:
|
63
|
811 tscat = ts.categories.get_categories()
|
98
|
812 cnames = [x.get("name", "?").strip() for x in tscat]
|
75
|
813 cids = [x.get("id", "?") for x in tscat]
|
63
|
814 catID = None
|
98
|
815 if tfcat.strip() in cnames:
|
|
816 ci = cnames.index(tfcat)
|
63
|
817 catID = cids[ci]
|
75
|
818 res = ts.repositories.create_repository(
|
|
819 name=self.args.tool_name,
|
|
820 synopsis="Synopsis:%s" % self.args.tool_desc,
|
|
821 description=self.args.tool_desc,
|
|
822 type="unrestricted",
|
|
823 remote_repository_url=self.args.toolshed_url,
|
|
824 homepage_url=None,
|
|
825 category_ids=catID,
|
|
826 )
|
|
827 tid = res.get("id", None)
|
80
|
828 sto.write(f"##########create res={res}\n")
|
49
|
829 else:
|
101
|
830 i = rnames.index(self.tool_name)
|
75
|
831 tid = rids[i]
|
100
|
832 try:
|
|
833 res = ts.repositories.update_repository(
|
|
834 id=tid, tar_ball_path=self.newtarpath, commit_message=None)
|
|
835 sto.write(f"#####update res={res}\n")
|
|
836 except ConnectionError:
|
|
837 sto.write("Probably no change to repository - bioblend shed upload failed\n")
|
63
|
838 sto.close()
|
|
839
|
48
|
840 def eph_galaxy_load(self):
|
75
|
841 """load the new tool from the local toolshed after planemo uploads it"""
|
49
|
842 if os.path.exists(self.tlog):
|
50
|
843 tout = open(self.tlog, "a")
|
49
|
844 else:
|
50
|
845 tout = open(self.tlog, "w")
|
49
|
846 cll = [
|
|
847 "shed-tools",
|
|
848 "install",
|
|
849 "-g",
|
|
850 self.args.galaxy_url,
|
|
851 "--latest",
|
|
852 "-a",
|
|
853 self.args.galaxy_api_key,
|
|
854 "--name",
|
101
|
855 self.tool_name,
|
49
|
856 "--owner",
|
|
857 "fubar",
|
|
858 "--toolshed",
|
|
859 self.args.toolshed_url,
|
75
|
860 "--section_label",
|
63
|
861 "ToolFactory",
|
49
|
862 ]
|
63
|
863 tout.write("running\n%s\n" % " ".join(cll))
|
101
|
864 subp = subprocess.run(cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stderr=tout, stdout=tout)
|
75
|
865 tout.write(
|
101
|
866 "installed %s - got retcode %d\n" % (self.tool_name, subp.returncode)
|
75
|
867 )
|
63
|
868 tout.close()
|
98
|
869 return subp.returncode
|
63
|
870
|
99
|
871 def planemo_shedLoad(self):
|
63
|
872 """
|
|
873 planemo shed_create --shed_target testtoolshed
|
|
874 planemo shed_init --name=<name>
|
|
875 --owner=<shed_username>
|
|
876 --description=<short description>
|
|
877 [--remote_repository_url=<URL to .shed.yml on github>]
|
|
878 [--homepage_url=<Homepage for tool.>]
|
|
879 [--long_description=<long description>]
|
|
880 [--category=<category name>]*
|
66
|
881
|
|
882
|
63
|
883 planemo shed_update --check_diff --shed_target testtoolshed
|
|
884 """
|
|
885 if os.path.exists(self.tlog):
|
|
886 tout = open(self.tlog, "a")
|
48
|
887 else:
|
63
|
888 tout = open(self.tlog, "w")
|
75
|
889 ts = toolshed.ToolShedInstance(
|
|
890 url=self.args.toolshed_url, key=self.args.toolshed_api_key, verify=False
|
|
891 )
|
63
|
892 repos = ts.repositories.get_repositories()
|
75
|
893 rnames = [x.get("name", "?") for x in repos]
|
|
894 rids = [x.get("id", "?") for x in repos]
|
|
895 #cat = "ToolFactory generated tools"
|
101
|
896 if self.tool_name not in rnames:
|
75
|
897 cll = [
|
|
898 "planemo",
|
|
899 "shed_create",
|
|
900 "--shed_target",
|
|
901 "local",
|
|
902 "--owner",
|
|
903 "fubar",
|
|
904 "--name",
|
101
|
905 self.tool_name,
|
75
|
906 "--shed_key",
|
|
907 self.args.toolshed_api_key,
|
|
908 ]
|
63
|
909 try:
|
98
|
910 subp = subprocess.run(
|
101
|
911 cll, env=self.ourenv, shell=False, cwd=self.tooloutdir, stdout=tout, stderr=tout
|
63
|
912 )
|
|
913 except:
|
|
914 pass
|
98
|
915 if subp.returncode != 0:
|
101
|
916 tout.write("Repository %s exists\n" % self.tool_name)
|
63
|
917 else:
|
101
|
918 tout.write("initiated %s\n" % self.tool_name)
|
63
|
919 cll = [
|
|
920 "planemo",
|
|
921 "shed_upload",
|
|
922 "--shed_target",
|
|
923 "local",
|
|
924 "--owner",
|
|
925 "fubar",
|
|
926 "--name",
|
101
|
927 self.tool_name,
|
63
|
928 "--shed_key",
|
|
929 self.args.toolshed_api_key,
|
|
930 "--tar",
|
|
931 self.newtarpath,
|
|
932 ]
|
101
|
933 subp = subprocess.run(cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stdout=tout, stderr=tout)
|
98
|
934 tout.write("Ran %s got %d\n" % (" ".join(cll),subp.returncode))
|
49
|
935 tout.close()
|
98
|
936 return subp.returncode
|
48
|
937
|
76
|
938 def eph_test(self, genoutputs=True):
|
|
939 """problem getting jobid - ephemeris upload is the job before the one we want - but depends on how many inputs
|
|
940 """
|
75
|
941 if os.path.exists(self.tlog):
|
|
942 tout = open(self.tlog, "a")
|
|
943 else:
|
|
944 tout = open(self.tlog, "w")
|
|
945 cll = [
|
|
946 "shed-tools",
|
|
947 "test",
|
|
948 "-g",
|
|
949 self.args.galaxy_url,
|
|
950 "-a",
|
|
951 self.args.galaxy_api_key,
|
|
952 "--name",
|
101
|
953 self.tool_name,
|
75
|
954 "--owner",
|
|
955 "fubar",
|
|
956 ]
|
76
|
957 if genoutputs:
|
|
958 dummy, tfile = tempfile.mkstemp()
|
98
|
959 subp = subprocess.run(
|
101
|
960 cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stderr=dummy, stdout=dummy
|
76
|
961 )
|
|
962
|
|
963 with open('tool_test_output.json','rb') as f:
|
|
964 s = json.loads(f.read())
|
|
965 print('read %s' % s)
|
|
966 cl = s['tests'][0]['data']['job']['command_line'].split()
|
|
967 n = cl.index('--script_path')
|
|
968 jobdir = cl[n+1]
|
|
969 jobdir = jobdir.replace('"','')
|
|
970 jobdir = jobdir.split('/configs')[0]
|
|
971 print('jobdir=%s' % jobdir)
|
|
972
|
|
973 #"/home/ross/galthrow/database/jobs_directory/000/649/configs/tmptfxu51gs\"
|
|
974 src = os.path.join(jobdir,'working',self.newtarpath)
|
|
975 if os.path.exists(src):
|
|
976 dest = os.path.join(self.testdir, self.newtarpath)
|
|
977 shutil.copyfile(src, dest)
|
|
978 else:
|
|
979 tout.write('No toolshed archive found after first ephemeris test - not a good sign')
|
|
980 ephouts = os.path.join(jobdir,'working','tfout','test-data')
|
|
981 with os.scandir(ephouts) as outs:
|
|
982 for entry in outs:
|
|
983 if not entry.is_file():
|
|
984 continue
|
|
985 dest = os.path.join(self.tooloutdir, entry.name)
|
|
986 src = os.path.join(ephouts, entry.name)
|
|
987 shutil.copyfile(src, dest)
|
|
988 else:
|
98
|
989 subp = subprocess.run(
|
101
|
990 cll, env=self.ourenv, cwd=self.ourcwd, shell=False, stderr=tout, stdout=tout)
|
98
|
991 tout.write("eph_test Ran %s got %d" % (" ".join(cll), subp.returncode))
|
75
|
992 tout.close()
|
98
|
993 return subp.returncode
|
63
|
994
|
83
|
995 def planemo_test_biocontainer(self, genoutputs=True):
|
|
996 """planemo is a requirement so is available for testing but testing in a biocontainer
|
|
997 requires some fiddling to use the hacked galaxy-central .venv
|
|
998
|
|
999 Planemo runs:
|
|
1000 python ./scripts/functional_tests.py -v --with-nosehtml --html-report-file
|
|
1001 /export/galaxy-central/database/job_working_directory/000/17/working/TF_run_report_tempdir/tacrev_planemo_test_report.html
|
|
1002 --with-xunit --xunit-file /tmp/tmpt90p7f9h/xunit.xml --with-structureddata
|
|
1003 --structured-data-file
|
|
1004 /export/galaxy-central/database/job_working_directory/000/17/working/tfout/tool_test_output.json functional.test_toolbox
|
|
1005
|
|
1006
|
|
1007 for the planemo-biocontainer,
|
|
1008 planemo test --conda_dependency_resolution --skip_venv --galaxy_root /galthrow/ rgToolFactory2.xml
|
|
1009
|
|
1010 """
|
|
1011 xreal = "%s.xml" % self.tool_name
|
|
1012 tool_test_path = os.path.join(self.repdir,f"{self.tool_name}_planemo_test_report.html")
|
|
1013 if os.path.exists(self.tlog):
|
|
1014 tout = open(self.tlog, "a")
|
|
1015 else:
|
|
1016 tout = open(self.tlog, "w")
|
|
1017 if genoutputs:
|
|
1018 dummy, tfile = tempfile.mkstemp()
|
|
1019 cll = [
|
95
|
1020 ".", os.path.join(self.args.galaxy_root,'.venv','bin','activate'),"&&",
|
83
|
1021 "planemo",
|
|
1022 "test",
|
98
|
1023 "--test_data", self.testdir,
|
|
1024 "--test_output", tool_test_path,
|
83
|
1025 "--skip_venv",
|
|
1026 "--galaxy_root",
|
91
|
1027 self.args.galaxy_root,
|
83
|
1028 "--update_test_data",
|
98
|
1029 xreal,
|
83
|
1030 ]
|
98
|
1031 subp = subprocess.run(
|
83
|
1032 cll,
|
101
|
1033 env=self.ourenv,
|
83
|
1034 shell=False,
|
|
1035 cwd=self.tooloutdir,
|
|
1036 stderr=dummy,
|
|
1037 stdout=dummy,
|
|
1038 )
|
|
1039
|
|
1040 else:
|
|
1041 cll = [
|
95
|
1042 ".", os.path.join(self.args.galaxy_root,'.venv','bin','activate'),"&&",
|
83
|
1043 "planemo",
|
|
1044 "test",
|
98
|
1045 "--test_data", os.path.self.testdir,
|
|
1046 "--test_output", os.path.tool_test_path,
|
83
|
1047 "--skip_venv",
|
|
1048 "--galaxy_root",
|
91
|
1049 self.args.galaxy_root,
|
98
|
1050 xreal,
|
83
|
1051 ]
|
98
|
1052 subp = subprocess.run(
|
101
|
1053 cll, env=self.ourenv, shell=False, cwd=self.tooloutdir, stderr=tout, stdout=tout
|
83
|
1054 )
|
|
1055 tout.close()
|
98
|
1056 return subp.returncode
|
83
|
1057
|
|
1058
|
48
|
1059 def writeShedyml(self):
|
75
|
1060 """for planemo"""
|
49
|
1061 yuser = self.args.user_email.split("@")[0]
|
|
1062 yfname = os.path.join(self.tooloutdir, ".shed.yml")
|
|
1063 yamlf = open(yfname, "w")
|
|
1064 odict = {
|
|
1065 "name": self.tool_name,
|
|
1066 "owner": yuser,
|
|
1067 "type": "unrestricted",
|
|
1068 "description": self.args.tool_desc,
|
50
|
1069 "synopsis": self.args.tool_desc,
|
|
1070 "category": "TF Generated Tools",
|
49
|
1071 }
|
48
|
1072 yaml.dump(odict, yamlf, allow_unicode=True)
|
|
1073 yamlf.close()
|
|
1074
|
50
|
1075 def makeTool(self):
|
75
|
1076 """write xmls and input samples into place"""
|
50
|
1077 self.makeXML()
|
|
1078 if self.args.script_path:
|
|
1079 stname = os.path.join(self.tooloutdir, "%s" % (self.sfile))
|
|
1080 if not os.path.exists(stname):
|
|
1081 shutil.copyfile(self.sfile, stname)
|
|
1082 xreal = "%s.xml" % self.tool_name
|
|
1083 xout = os.path.join(self.tooloutdir, xreal)
|
|
1084 shutil.copyfile(xreal, xout)
|
|
1085 for p in self.infiles:
|
|
1086 pth = p[IPATHPOS]
|
|
1087 dest = os.path.join(self.testdir, "%s_sample" % p[ICLPOS])
|
|
1088 shutil.copyfile(pth, dest)
|
49
|
1089
|
50
|
1090 def makeToolTar(self):
|
75
|
1091 """move outputs into test-data and prepare the tarball"""
|
101
|
1092 excludeme = "_planemo_test_report.html"
|
75
|
1093
|
66
|
1094 def exclude_function(tarinfo):
|
75
|
1095 filename = tarinfo.name
|
|
1096 return (
|
|
1097 None
|
101
|
1098 if filename.endswith(excludeme)
|
75
|
1099 else tarinfo
|
|
1100 )
|
66
|
1101
|
50
|
1102 for p in self.outfiles:
|
96
|
1103 oname = p[ONAMEPOS]
|
99
|
1104 tdest = os.path.join(self.testdir, "%s_sample" % oname)
|
|
1105 if not os.path.isfile(tdest):
|
|
1106 src = os.path.join(self.testdir,oname)
|
|
1107 if os.path.isfile(src):
|
|
1108 shutil.copyfile(src, tdest)
|
|
1109 dest = os.path.join(self.repdir, "%s.sample" % (oname))
|
|
1110 shutil.copyfile(src, dest)
|
|
1111 else:
|
|
1112 print(
|
|
1113 "### problem - output file %s not found in testdir %s"
|
|
1114 % (tdest, self.testdir)
|
|
1115 )
|
50
|
1116 tf = tarfile.open(self.newtarpath, "w:gz")
|
66
|
1117 tf.add(name=self.tooloutdir, arcname=self.tool_name, filter=exclude_function)
|
50
|
1118 tf.close()
|
|
1119 shutil.copyfile(self.newtarpath, self.args.new_tool)
|
|
1120
|
|
1121 def moveRunOutputs(self):
|
75
|
1122 """need to move planemo or run outputs into toolfactory collection"""
|
50
|
1123 with os.scandir(self.tooloutdir) as outs:
|
|
1124 for entry in outs:
|
80
|
1125 if not entry.is_file():
|
|
1126 continue
|
|
1127 if "." in entry.name:
|
|
1128 nayme, ext = os.path.splitext(entry.name)
|
|
1129 if ext in ['.yml','.xml','.json','.yaml']:
|
|
1130 ext = f'{ext}.txt'
|
|
1131 else:
|
|
1132 ext = ".txt"
|
|
1133 ofn = "%s%s" % (entry.name.replace(".", "_"), ext)
|
|
1134 dest = os.path.join(self.repdir, ofn)
|
|
1135 src = os.path.join(self.tooloutdir, entry.name)
|
|
1136 shutil.copyfile(src, dest)
|
|
1137 with os.scandir(self.testdir) as outs:
|
|
1138 for entry in outs:
|
101
|
1139 if (not entry.is_file()) or entry.name.endswith('_sample') or entry.name.endswith("_planemo_test_report.html"):
|
50
|
1140 continue
|
|
1141 if "." in entry.name:
|
|
1142 nayme, ext = os.path.splitext(entry.name)
|
|
1143 else:
|
|
1144 ext = ".txt"
|
80
|
1145 newname = f"{entry.name}{ext}"
|
|
1146 dest = os.path.join(self.repdir, newname)
|
|
1147 src = os.path.join(self.testdir, entry.name)
|
50
|
1148 shutil.copyfile(src, dest)
|
|
1149
|
49
|
1150
|
76
|
1151
|
48
|
1152 def main():
|
|
1153 """
|
|
1154 This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
|
49
|
1155 <command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath"
|
|
1156 --tool_name "foo" --interpreter "Rscript"
|
48
|
1157 </command>
|
|
1158 """
|
|
1159 parser = argparse.ArgumentParser()
|
|
1160 a = parser.add_argument
|
49
|
1161 a("--script_path", default=None)
|
|
1162 a("--history_test", default=None)
|
|
1163 a("--cl_prefix", default=None)
|
|
1164 a("--sysexe", default=None)
|
|
1165 a("--packages", default=None)
|
76
|
1166 a("--tool_name", default="newtool")
|
72
|
1167 a("--tool_dir", default=None)
|
48
|
1168 a("--input_files", default=[], action="append")
|
|
1169 a("--output_files", default=[], action="append")
|
|
1170 a("--user_email", default="Unknown")
|
|
1171 a("--bad_user", default=None)
|
49
|
1172 a("--make_Tool", default="runonly")
|
48
|
1173 a("--help_text", default=None)
|
|
1174 a("--tool_desc", default=None)
|
|
1175 a("--tool_version", default=None)
|
|
1176 a("--citations", default=None)
|
49
|
1177 a("--command_override", default=None)
|
|
1178 a("--test_override", default=None)
|
48
|
1179 a("--additional_parameters", action="append", default=[])
|
|
1180 a("--edit_additional_parameters", action="store_true", default=False)
|
|
1181 a("--parampass", default="positional")
|
|
1182 a("--tfout", default="./tfout")
|
|
1183 a("--new_tool", default="new_tool")
|
49
|
1184 a("--galaxy_url", default="http://localhost:8080")
|
75
|
1185 a(
|
76
|
1186 "--toolshed_url", default="http://localhost:9009")
|
|
1187 # make sure this is identical to tool_sheds_conf.xml localhost != 127.0.0.1 so validation fails
|
63
|
1188 a("--toolshed_api_key", default="fakekey")
|
50
|
1189 a("--galaxy_api_key", default="fakekey")
|
|
1190 a("--galaxy_root", default="/galaxy-central")
|
101
|
1191 a("--galaxy_venv", default="/galaxy_venv")
|
48
|
1192 args = parser.parse_args()
|
|
1193 assert not args.bad_user, (
|
|
1194 'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to "admin_users" in the Galaxy configuration file'
|
|
1195 % (args.bad_user, args.bad_user)
|
|
1196 )
|
49
|
1197 assert args.tool_name, "## Tool Factory expects a tool name - eg --tool_name=DESeq"
|
48
|
1198 assert (
|
49
|
1199 args.sysexe or args.packages
|
48
|
1200 ), "## Tool Factory wrapper expects an interpreter or an executable package"
|
49
|
1201 args.input_files = [x.replace('"', "").replace("'", "") for x in args.input_files]
|
48
|
1202 # remove quotes we need to deal with spaces in CL params
|
|
1203 for i, x in enumerate(args.additional_parameters):
|
49
|
1204 args.additional_parameters[i] = args.additional_parameters[i].replace('"', "")
|
48
|
1205 r = ScriptRunner(args)
|
49
|
1206 r.writeShedyml()
|
|
1207 r.makeTool()
|
66
|
1208 if args.make_Tool == "generate":
|
101
|
1209 retcode = r.run() # for testing toolfactory itself
|
66
|
1210 r.moveRunOutputs()
|
|
1211 r.makeToolTar()
|
|
1212 else:
|
101
|
1213 r.planemo_biodocker_test() # test to make outputs and then test
|
66
|
1214 r.moveRunOutputs()
|
|
1215 r.makeToolTar()
|
101
|
1216 if args.make_Tool == "gentestinstall":
|
|
1217 r.shedLoad()
|
|
1218 r.eph_galaxy_load()
|
63
|
1219
|
48
|
1220
|
|
1221 if __name__ == "__main__":
|
|
1222 main()
|