Mercurial > repos > devteam > data_manager_fetch_ncbi_taxonomy
changeset 8:2649aece3781 draft default tip
planemo upload for repository https://github.com/galaxyproject/tools-iuc/tree/main/data_managers/data_manager_fetch_ncbi_taxonomy commit cf8607692417bdc4f663c726aea34c1056dd9c48
| author | iuc |
|---|---|
| date | Mon, 17 Nov 2025 21:47:03 +0000 |
| parents | 34a5799a65fa |
| children | |
| files | data_manager/data_manager.py data_manager/ncbi_taxonomy_fetcher.xml test-data/ncbi_accession2taxid.loc test-data/ncbi_taxonomy.loc test-data/taxonomy.json test-data/taxonomy_with_accession2taxid.json |
| diffstat | 6 files changed, 92 insertions(+), 159 deletions(-) [+] |
line wrap: on
line diff
--- a/data_manager/data_manager.py Tue May 21 07:36:09 2024 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,134 +0,0 @@ -import argparse -import datetime -import json -import os -import shutil -import tarfile -import zipfile -from urllib.request import Request, urlopen - - -def url_download(url, workdir): - file_path = os.path.join(workdir, 'download.dat') - if not os.path.exists(workdir): - os.makedirs(workdir) - src = None - dst = None - try: - req = Request(url) - src = urlopen(req) - with open(file_path, 'wb') as dst: - while True: - chunk = src.read(2**10) - if chunk: - dst.write(chunk) - else: - break - finally: - if src: - src.close() - if tarfile.is_tarfile(file_path): - fh = tarfile.open(file_path, 'r:*') - elif zipfile.is_zipfile(file_path): - fh = zipfile.ZipFile(file_path, 'r') - else: - return - fh.extractall(workdir) - os.remove(file_path) - - -def download_name_maps(url, workdir, partial): - - if partial: - map_files = [ - 'pdb.accession2taxid.gz', - ] - else: - map_files = [ - 'dead_nucl.accession2taxid.gz', - 'dead_prot.accession2taxid.gz', - 'dead_wgs.accession2taxid.gz', - 'nucl_gb.accession2taxid.gz', - 'nucl_wgs.accession2taxid.gz', - 'pdb.accession2taxid.gz', - 'prot.accession2taxid.gz', - 'prot.accession2taxid.FULL.gz' - ] - - if not os.path.exists(workdir): - os.makedirs(workdir) - - for map in map_files: - src = "{}{}".format(url, map) - dest = os.path.join(workdir, map) - - print("Downloading taxonomy accession2taxid file from {} to {}".format(src, dest)) - - try: - req = Request(src) - src = urlopen(req) - with open(dest, 'wb') as dst: - while True: - chunk = src.read(2**10) - if chunk: - dst.write(chunk) - else: - break - finally: - if src: - src.close() - - -def move_files_to_final_dir(workdir, target_directory, copy=False): - for filename in os.listdir(workdir): - if copy: - shutil.copy(os.path.join(workdir, filename), target_directory) - else: - shutil.move(os.path.join(workdir, filename), target_directory) - - -def main(args): - workdir = os.path.abspath(os.path.join(os.getcwd(), 'taxonomy')) - url_download(args.url, workdir) - - data_manager_entry = {} - data_manager_entry['value'] = args.name.lower() - data_manager_entry['name'] = args.name - data_manager_entry['path'] = '.' - data_manager_json = dict(data_tables=dict(ncbi_taxonomy=data_manager_entry)) - - with open(args.output) as fh: - params = json.load(fh) - - if args.name_maps: - workdir_a2t = os.path.join(os.getcwd(), 'accession2taxid') - download_name_maps("ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/accession2taxid/", workdir_a2t, args.partial) - - target_directory_a2t = os.path.join(params['output_data'][0]['extra_files_path'], "accession2taxid") - os.makedirs(target_directory_a2t) - move_files_to_final_dir(workdir_a2t, target_directory_a2t) - - # Also copy taxonomy data to accession2taxid dir - move_files_to_final_dir(workdir, target_directory_a2t, copy=True) - - data_manager_json['data_tables']['ncbi_accession2taxid'] = data_manager_entry - - target_directory_tax = os.path.join(params['output_data'][0]['extra_files_path'], "taxonomy") - os.makedirs(target_directory_tax) - - move_files_to_final_dir(workdir, target_directory_tax) - - with open(args.output, 'w') as fh: - json.dump(data_manager_json, fh, sort_keys=True) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Create data manager json.') - parser.add_argument('--out', dest='output', action='store', help='JSON filename') - parser.add_argument('--name', dest='name', action='store', default=str(datetime.date.today()), help='Data table entry unique ID') - parser.add_argument('--url', dest='url', action='store', default='ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz', help='Download URL') - parser.add_argument('--name-maps', dest='name_maps', action='store_true', help='') - parser.add_argument('--partial', dest='partial', action='store_true', help='Only download a small subset of data (for testing)') - args = parser.parse_args() - - main(args)
--- a/data_manager/ncbi_taxonomy_fetcher.xml Tue May 21 07:36:09 2024 +0000 +++ b/data_manager/ncbi_taxonomy_fetcher.xml Mon Nov 17 21:47:03 2025 +0000 @@ -1,26 +1,68 @@ -<?xml version="1.0"?> -<tool id="ncbi_taxonomy_fetcher" name="NCBI" tool_type="manage_data" version="1.0.4" profile="23.0"> +<tool id="ncbi_taxonomy_fetcher" name="NCBI" tool_type="manage_data" version="1.1" profile="24.0"> <description>taxonomy downloader</description> <requirements> - <requirement type="package" version="3.7">python</requirement> + <requirement type="package" version="1.25.0">wget</requirement> </requirements> <command detect_errors="exit_code"> <![CDATA[ - python '$__tool_directory__/data_manager.py' --out '${out_file}' - #if $taxonomy_url: - --url '${taxonomy_url}' + set -o pipefail; + + mkdir -p '$out_file.extra_files_path/taxonomy/' && + wget -O - '$taxonomy_url' | tar -xz -C '$out_file.extra_files_path/taxonomy/' && + if [[ ! -f '$out_file.extra_files_path/taxonomy/nodes.dmp' ]]; then >&2 echo "nodes.dmp missing"; exit 1; fi && + + #if $name_maps + mkdir -p '$out_file.extra_files_path/accession2taxid/' && + #if $partial_data + #set files = ['pdb.accession2taxid'] + #else + #set files = ['dead_nucl.accession2taxid', 'dead_prot.accession2taxid', 'dead_wgs.accession2taxid', 'nucl_gb.accession2taxid', 'nucl_wgs.accession2taxid', 'pdb.accession2taxid', 'prot.accession2taxid', 'prot.accession2taxid.FULL'] + #end if + #for file in files + wget -O - ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/accession2taxid/${file}.gz | gunzip -c > '$out_file.extra_files_path/accession2taxid/${file}' && + #end for #end if - #if $database_name: - --name '${database_name}' - #end if - $name_maps - $partial_data + cp '$dmjson' '$out_file' ]]> </command> + <configfiles> + <configfile name="dmjson"><![CDATA[#slurp +#if $database_name == "" + #import datetime + #import os.path + #set now = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S") + #set basename = os.path.basename(str($taxonomy_url))[:-7] + #set value = basename + "_" + now + #set name = basename + " " + now +#else + #set value = $database_name.lower() + #set name = $database_name +#end if +{ + "data_tables":{ + "ncbi_taxonomy":[ + { + "value": "$value", + "name": "$name", + "path": "." + } + ] +#if $name_maps + ,"ncbi_accession2taxid":[ + { + "value": "$value", + "name": "$name", + "path": "." + } + ] +#end if + } +}]]></configfile> + </configfiles> <inputs> - <param name="database_name" type="text" optional="true" label="Name for this database" help="Enter a unique identifier, or leave blank for today's date" /> + <param name="database_name" type="text" label="Name for this database" help="Enter a unique identifier, or leave blank for today's date" /> <param name="taxonomy_url" type="text" value='ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz' label="Enter URL for taxonomy files" /> - <param name="name_maps" type="boolean" truevalue="--name-maps" falsevalue="" label="Also download accession2taxid data" checked="false" /> + <param name="name_maps" type="boolean" label="Also download accession2taxid data" checked="false" /> <param name="partial_data" type="hidden" value="" help="Used for testing"/> </inputs> <outputs> @@ -32,6 +74,14 @@ <output name="out_file" value="taxonomy.json"/> </test> <test> + <output name="out_file"> + <assert_contents> + <has_text_matching expression='"value": "taxdump_\d\d\d\d-\d\d-\d\d-\d\d\d\d\d\d"'/> + <has_text_matching expression='"name": "taxdump \d\d\d\d-\d\d-\d\d-\d\d\d\d\d\d"'/> + </assert_contents> + </output> + </test> + <test> <param name="database_name" value="tax_name"/> <param name="name_maps" value="true"/> <param name="partial_data" value="--partial"/>
--- a/test-data/ncbi_accession2taxid.loc Tue May 21 07:36:09 2024 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,5 +0,0 @@ -# Tab separated fields where -# value is unique key -# name is descriptive name -# path is path to directory containing accession2taxid files -#value name path
--- a/test-data/ncbi_taxonomy.loc Tue May 21 07:36:09 2024 +0000 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,5 +0,0 @@ -# Tab separated fields where -# value is unique key -# name is descriptive name -# path is path to directory containing names.dmp and nodes.dmp files -#value name path
--- a/test-data/taxonomy.json Tue May 21 07:36:09 2024 +0000 +++ b/test-data/taxonomy.json Mon Nov 17 21:47:03 2025 +0000 @@ -1,1 +1,11 @@ -{"data_tables": {"ncbi_taxonomy": {"name": "tax_name", "path": ".", "value": "tax_name"}}} \ No newline at end of file +{ + "data_tables":{ + "ncbi_taxonomy":[ + { + "value": "tax_name", + "name": "tax_name", + "path": "." + } + ] + } +} \ No newline at end of file
--- a/test-data/taxonomy_with_accession2taxid.json Tue May 21 07:36:09 2024 +0000 +++ b/test-data/taxonomy_with_accession2taxid.json Mon Nov 17 21:47:03 2025 +0000 @@ -1,1 +1,18 @@ -{"data_tables": {"ncbi_accession2taxid": {"name": "tax_name", "path": ".", "value": "tax_name"}, "ncbi_taxonomy": {"name": "tax_name", "path": ".", "value": "tax_name"}}} \ No newline at end of file +{ + "data_tables":{ + "ncbi_taxonomy":[ + { + "value": "tax_name", + "name": "tax_name", + "path": "." + } + ] + ,"ncbi_accession2taxid":[ + { + "value": "tax_name", + "name": "tax_name", + "path": "." + } + ] + } +} \ No newline at end of file
