Mercurial > repos > shellac > guppy_basecaller
comparison env/lib/python3.7/site-packages/bioblend/_tests/TestGalaxyTools.py @ 2:6af9afd405e9 draft
"planemo upload commit 0a63dd5f4d38a1f6944587f52a8cd79874177fc1"
author | shellac |
---|---|
date | Thu, 14 May 2020 14:56:58 -0400 |
parents | 26e78fe6e8c4 |
children |
comparison
equal
deleted
inserted
replaced
1:75ca89e9b81c | 2:6af9afd405e9 |
---|---|
1 """ | |
2 """ | |
3 import os | |
4 | |
5 import six | |
6 | |
7 from bioblend.galaxy.tools.inputs import ( | |
8 conditional, | |
9 dataset, | |
10 inputs, | |
11 repeat, | |
12 ) | |
13 from . import GalaxyTestBase, test_util | |
14 | |
15 | |
16 class TestGalaxyTools(GalaxyTestBase.GalaxyTestBase): | |
17 | |
18 def setUp(self): | |
19 super(TestGalaxyTools, self).setUp() | |
20 | |
21 def test_get_tools(self): | |
22 # Test requires target Galaxy is configured with at least one tool. | |
23 tools = self.gi.tools.get_tools() | |
24 self.assertGreater(len(tools), 0) | |
25 self.assertTrue(all(map(self._assert_is_tool_rep, tools))) | |
26 | |
27 def test_get_tool_panel(self): | |
28 # Test requires target Galaxy is configured with at least one tool | |
29 # section. | |
30 tool_panel = self.gi.tools.get_tool_panel() | |
31 sections = [s for s in tool_panel if "elems" in s] | |
32 self.assertGreater(len(sections), 0) | |
33 self.assertTrue(all(map(self._assert_is_tool_rep, sections[0]["elems"]))) | |
34 | |
35 def _assert_is_tool_rep(self, data): | |
36 self.assertTrue(data["model_class"].endswith("Tool")) | |
37 # Special tools like SetMetadataTool may have different model_class | |
38 # than Tool - but they all seem to end in tool. | |
39 | |
40 for key in ["name", "id", "version"]: | |
41 self.assertIn(key, data) | |
42 return True | |
43 | |
44 def test_paste_content(self): | |
45 history = self.gi.histories.create_history(name="test_paste_data history") | |
46 paste_text = 'line 1\nline 2\rline 3\r\nline 4' | |
47 tool_output = self.gi.tools.paste_content(paste_text, history["id"]) | |
48 self.assertEqual(len(tool_output["outputs"]), 1) | |
49 # All lines in the resulting dataset should end with "\n" | |
50 expected_contents = six.b("\n".join(paste_text.splitlines()) + "\n") | |
51 self._wait_and_verify_dataset(tool_output['outputs'][0]['id'], expected_contents) | |
52 # Same with space_to_tab=True | |
53 tool_output = self.gi.tools.paste_content(paste_text, history["id"], space_to_tab=True) | |
54 self.assertEqual(len(tool_output["outputs"]), 1) | |
55 expected_contents = six.b("\n".join("\t".join(_.split()) for _ in paste_text.splitlines()) + "\n") | |
56 self._wait_and_verify_dataset(tool_output['outputs'][0]['id'], expected_contents) | |
57 | |
58 def test_upload_file(self): | |
59 history = self.gi.histories.create_history(name="test_upload_file history") | |
60 | |
61 fn = test_util.get_abspath("test_util.py") | |
62 file_name = "test1" | |
63 tool_output = self.gi.tools.upload_file( | |
64 fn, | |
65 # First param could be a regular path also of course... | |
66 history_id=history["id"], | |
67 file_name=file_name, | |
68 dbkey="?", | |
69 file_type="txt", | |
70 ) | |
71 self._wait_for_and_verify_upload(tool_output, file_name, fn, expected_dbkey="?") | |
72 | |
73 def test_upload_file_dbkey(self): | |
74 history = self.gi.histories.create_history(name="test_upload_file history") | |
75 fn = test_util.get_abspath("test_util.py") | |
76 file_name = "test1" | |
77 dbkey = "hg19" | |
78 tool_output = self.gi.tools.upload_file( | |
79 fn, | |
80 history_id=history["id"], | |
81 file_name=file_name, | |
82 dbkey=dbkey, | |
83 file_type="txt", | |
84 ) | |
85 self._wait_for_and_verify_upload(tool_output, file_name, fn, expected_dbkey=dbkey) | |
86 | |
87 @test_util.skip_unless_tool("random_lines1") | |
88 def test_run_random_lines(self): | |
89 # Run second test case from randomlines.xml | |
90 history_id = self.gi.histories.create_history(name="test_run_random_lines history")["id"] | |
91 with open(test_util.get_abspath(os.path.join("data", "1.bed"))) as f: | |
92 contents = f.read() | |
93 dataset_id = self._test_dataset(history_id, contents=contents) | |
94 tool_inputs = inputs().set( | |
95 "num_lines", "1" | |
96 ).set( | |
97 "input", dataset(dataset_id) | |
98 ).set( | |
99 "seed_source", conditional().set( | |
100 "seed_source_selector", "set_seed" | |
101 ).set( | |
102 "seed", "asdf" | |
103 ) | |
104 ) | |
105 tool_output = self.gi.tools.run_tool( | |
106 history_id=history_id, | |
107 tool_id="random_lines1", | |
108 tool_inputs=tool_inputs | |
109 ) | |
110 self.assertEqual(len(tool_output["outputs"]), 1) | |
111 # TODO: Wait for results and verify has 1 line and is | |
112 # chr5 131424298 131424460 CCDS4149.1_cds_0_0_chr5_131424299_f 0 + | |
113 | |
114 @test_util.skip_unless_tool("cat1") | |
115 def test_run_cat1(self): | |
116 history_id = self.gi.histories.create_history(name="test_run_cat1 history")["id"] | |
117 dataset1_id = self._test_dataset(history_id, contents="1 2 3") | |
118 dataset2_id = self._test_dataset(history_id, contents="4 5 6") | |
119 dataset3_id = self._test_dataset(history_id, contents="7 8 9") | |
120 tool_inputs = inputs().set( | |
121 "input1", dataset(dataset1_id) | |
122 ).set( | |
123 "queries", repeat().instance( | |
124 inputs().set("input2", dataset(dataset2_id)) | |
125 ).instance( | |
126 inputs().set("input2", dataset(dataset3_id)) | |
127 ) | |
128 ) | |
129 tool_output = self.gi.tools.run_tool( | |
130 history_id=history_id, | |
131 tool_id="cat1", | |
132 tool_inputs=tool_inputs | |
133 ) | |
134 self.assertEqual(len(tool_output["outputs"]), 1) | |
135 # TODO: Wait for results and verify it has 3 lines - 1 2 3, 4 5 6, | |
136 # and 7 8 9. | |
137 | |
138 # This test doesn't work any more on Galaxy 16.10 because that release uses an old Conda 3.19.3 | |
139 @test_util.skip_unless_galaxy('release_17.01') | |
140 def test_tool_dependency_install(self): | |
141 installed_dependencies = self.gi.tools.install_dependencies('CONVERTER_fasta_to_bowtie_color_index') | |
142 self.assertTrue(any(True for d in installed_dependencies if d.get('name') == 'bowtie' and d.get('dependency_type') == 'conda'), "installed_dependencies is %s" % installed_dependencies) | |
143 | |
144 def _wait_for_and_verify_upload(self, tool_output, file_name, fn, expected_dbkey="?"): | |
145 self.assertEqual(len(tool_output["outputs"]), 1) | |
146 output = tool_output['outputs'][0] | |
147 self.assertEqual(output['name'], file_name) | |
148 expected_contents = open(fn, "rb").read() | |
149 self._wait_and_verify_dataset(output["id"], expected_contents) | |
150 self.assertEqual(output["genome_build"], expected_dbkey) |