Mercurial > repos > guerler > springsuite
comparison planemo/lib/python3.7/site-packages/bioblend/_tests/TestGalaxyTools.py @ 0:d30785e31577 draft
"planemo upload commit 6eee67778febed82ddd413c3ca40b3183a3898f1"
author | guerler |
---|---|
date | Fri, 31 Jul 2020 00:18:57 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:d30785e31577 |
---|---|
1 """ | |
2 """ | |
3 import os | |
4 | |
5 from bioblend.galaxy.tools.inputs import ( | |
6 conditional, | |
7 dataset, | |
8 inputs, | |
9 repeat, | |
10 ) | |
11 from . import GalaxyTestBase, test_util | |
12 | |
13 | |
14 class TestGalaxyTools(GalaxyTestBase.GalaxyTestBase): | |
15 | |
16 def test_get_tools(self): | |
17 # Test requires target Galaxy is configured with at least one tool. | |
18 tools = self.gi.tools.get_tools() | |
19 self.assertGreater(len(tools), 0) | |
20 self.assertTrue(all(map(self._assert_is_tool_rep, tools))) | |
21 | |
22 def test_get_tool_panel(self): | |
23 # Test requires target Galaxy is configured with at least one tool | |
24 # section. | |
25 tool_panel = self.gi.tools.get_tool_panel() | |
26 sections = [s for s in tool_panel if "elems" in s] | |
27 self.assertGreater(len(sections), 0) | |
28 self.assertTrue(all(map(self._assert_is_tool_rep, sections[0]["elems"]))) | |
29 | |
30 def _assert_is_tool_rep(self, data): | |
31 self.assertTrue(data["model_class"].endswith("Tool")) | |
32 # Special tools like SetMetadataTool may have different model_class | |
33 # than Tool - but they all seem to end in tool. | |
34 | |
35 for key in ["name", "id", "version"]: | |
36 self.assertIn(key, data) | |
37 return True | |
38 | |
39 def test_paste_content(self): | |
40 history = self.gi.histories.create_history(name="test_paste_data history") | |
41 paste_text = 'line 1\nline 2\rline 3\r\nline 4' | |
42 tool_output = self.gi.tools.paste_content(paste_text, history["id"]) | |
43 self.assertEqual(len(tool_output["outputs"]), 1) | |
44 # All lines in the resulting dataset should end with "\n" | |
45 expected_contents = ("\n".join(paste_text.splitlines()) + "\n").encode() | |
46 self._wait_and_verify_dataset(tool_output['outputs'][0]['id'], expected_contents) | |
47 # Same with space_to_tab=True | |
48 tool_output = self.gi.tools.paste_content(paste_text, history["id"], space_to_tab=True) | |
49 self.assertEqual(len(tool_output["outputs"]), 1) | |
50 expected_contents = ("\n".join("\t".join(_.split()) for _ in paste_text.splitlines()) + "\n").encode() | |
51 self._wait_and_verify_dataset(tool_output['outputs'][0]['id'], expected_contents) | |
52 | |
53 def test_upload_file(self): | |
54 history = self.gi.histories.create_history(name="test_upload_file history") | |
55 | |
56 fn = test_util.get_abspath("test_util.py") | |
57 file_name = "test1" | |
58 tool_output = self.gi.tools.upload_file( | |
59 fn, | |
60 # First param could be a regular path also of course... | |
61 history_id=history["id"], | |
62 file_name=file_name, | |
63 dbkey="?", | |
64 file_type="txt", | |
65 ) | |
66 self._wait_for_and_verify_upload(tool_output, file_name, fn, expected_dbkey="?") | |
67 | |
68 def test_upload_file_dbkey(self): | |
69 history = self.gi.histories.create_history(name="test_upload_file history") | |
70 fn = test_util.get_abspath("test_util.py") | |
71 file_name = "test1" | |
72 dbkey = "hg19" | |
73 tool_output = self.gi.tools.upload_file( | |
74 fn, | |
75 history_id=history["id"], | |
76 file_name=file_name, | |
77 dbkey=dbkey, | |
78 file_type="txt", | |
79 ) | |
80 self._wait_for_and_verify_upload(tool_output, file_name, fn, expected_dbkey=dbkey) | |
81 | |
82 @test_util.skip_unless_tool("random_lines1") | |
83 def test_run_random_lines(self): | |
84 # Run second test case from randomlines.xml | |
85 history_id = self.gi.histories.create_history(name="test_run_random_lines history")["id"] | |
86 with open(test_util.get_abspath(os.path.join("data", "1.bed"))) as f: | |
87 contents = f.read() | |
88 dataset_id = self._test_dataset(history_id, contents=contents) | |
89 tool_inputs = inputs().set( | |
90 "num_lines", "1" | |
91 ).set( | |
92 "input", dataset(dataset_id) | |
93 ).set( | |
94 "seed_source", conditional().set( | |
95 "seed_source_selector", "set_seed" | |
96 ).set( | |
97 "seed", "asdf" | |
98 ) | |
99 ) | |
100 tool_output = self.gi.tools.run_tool( | |
101 history_id=history_id, | |
102 tool_id="random_lines1", | |
103 tool_inputs=tool_inputs | |
104 ) | |
105 self.assertEqual(len(tool_output["outputs"]), 1) | |
106 # TODO: Wait for results and verify has 1 line and is | |
107 # chr5 131424298 131424460 CCDS4149.1_cds_0_0_chr5_131424299_f 0 + | |
108 | |
109 @test_util.skip_unless_tool("cat1") | |
110 def test_run_cat1(self): | |
111 history_id = self.gi.histories.create_history(name="test_run_cat1 history")["id"] | |
112 dataset1_id = self._test_dataset(history_id, contents="1 2 3") | |
113 dataset2_id = self._test_dataset(history_id, contents="4 5 6") | |
114 dataset3_id = self._test_dataset(history_id, contents="7 8 9") | |
115 tool_inputs = inputs().set( | |
116 "input1", dataset(dataset1_id) | |
117 ).set( | |
118 "queries", repeat().instance( | |
119 inputs().set("input2", dataset(dataset2_id)) | |
120 ).instance( | |
121 inputs().set("input2", dataset(dataset3_id)) | |
122 ) | |
123 ) | |
124 tool_output = self.gi.tools.run_tool( | |
125 history_id=history_id, | |
126 tool_id="cat1", | |
127 tool_inputs=tool_inputs | |
128 ) | |
129 self.assertEqual(len(tool_output["outputs"]), 1) | |
130 # TODO: Wait for results and verify it has 3 lines - 1 2 3, 4 5 6, | |
131 # and 7 8 9. | |
132 | |
133 def test_tool_dependency_install(self): | |
134 installed_dependencies = self.gi.tools.install_dependencies('CONVERTER_fasta_to_bowtie_color_index') | |
135 self.assertTrue(any(True for d in installed_dependencies if d.get('name') == 'bowtie' and d.get('dependency_type') == 'conda'), "installed_dependencies is %s" % installed_dependencies) | |
136 | |
137 def _wait_for_and_verify_upload(self, tool_output, file_name, fn, expected_dbkey="?"): | |
138 self.assertEqual(len(tool_output["outputs"]), 1) | |
139 output = tool_output['outputs'][0] | |
140 self.assertEqual(output['name'], file_name) | |
141 expected_contents = open(fn, "rb").read() | |
142 self._wait_and_verify_dataset(output["id"], expected_contents) | |
143 self.assertEqual(output["genome_build"], expected_dbkey) |