Mercurial > repos > devteam > data_manager_gatk_picard_index_builder
changeset 0:619dd9e9c09c draft
Imported from capsule None
author | devteam |
---|---|
date | Tue, 01 Apr 2014 10:48:03 -0400 |
parents | |
children | a15709ad26ea |
files | data_manager/data_manager_gatk_picard_index_builder.py data_manager/data_manager_gatk_picard_index_builder.xml data_manager_conf.xml tool-data/all_fasta.loc.sample tool-data/gatk_sorted_picard_index.loc.sample tool_data_table_conf.xml.sample tool_dependencies.xml |
diffstat | 7 files changed, 298 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/data_manager_gatk_picard_index_builder.py Tue Apr 01 10:48:03 2014 -0400 @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# Dave B. +# Uses fasta sorting functions written by Dan Blankenberg. + +import json +import optparse +import os +import shutil +import subprocess +import sys +import tempfile + +CHUNK_SIZE = 2**20 + +DEFAULT_DATA_TABLE_NAME = "fasta_indexes" + +def get_id_name( params, dbkey, fasta_description=None): + #TODO: ensure sequence_id is unique and does not already appear in location file + sequence_id = params['param_dict']['sequence_id'] + if not sequence_id: + sequence_id = dbkey + + sequence_name = params['param_dict']['sequence_name'] + if not sequence_name: + sequence_name = fasta_description + if not sequence_name: + sequence_name = dbkey + return sequence_id, sequence_name + +def build_picard_index( data_manager_dict, fasta_filename, target_directory, dbkey, sequence_id, sequence_name, jar, data_table_name=DEFAULT_DATA_TABLE_NAME ): + fasta_base_name = os.path.split( fasta_filename )[-1] + gatk_sorted_fasta_filename = os.path.join( target_directory, fasta_base_name ) + shutil.copy( fasta_filename, gatk_sorted_fasta_filename ) + _sort_fasta_gatk( gatk_sorted_fasta_filename ) + sam_index_filename = '%s.fai' % gatk_sorted_fasta_filename + if not os.path.exists( sam_index_filename ): + sam_command = [ 'samtools', 'faidx', gatk_sorted_fasta_filename ] + _run_command( sam_command, target_directory ) + args = [ 'java', '-jar', jar, 'R=%s' % gatk_sorted_fasta_filename, 'O=%s.dict' % sequence_id ] + _run_command( args, target_directory ) + data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name ) + _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ) + +def _run_command( command, target_directory ): + tmp_stderr = tempfile.NamedTemporaryFile( prefix = "tmp-data-manager-gatk_picard_index_builder-stderr" ) + proc = subprocess.Popen( args=command, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno() ) + return_code = proc.wait() + if return_code: + tmp_stderr.flush() + tmp_stderr.seek( 0 ) + sys.stderr.write( "Error building index:\n" ) + while True: + chunk = tmp_stderr.read( CHUNK_SIZE ) + if not chunk: + break + sys.stderr.write( chunk ) + sys.exit( return_code ) + tmp_stderr.close() + +def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ): + data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} ) + data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] ) + data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry ) + return data_manager_dict + +def _move_and_index_fasta_for_sorting( fasta_filename ): + unsorted_filename = tempfile.NamedTemporaryFile().name + shutil.move( fasta_filename, unsorted_filename ) + fasta_offsets = {} + unsorted_fh = open( unsorted_filename ) + while True: + offset = unsorted_fh.tell() + line = unsorted_fh.readline() + if not line: + break + if line.startswith( ">" ): + line = line.split( None, 1 )[0][1:] + fasta_offsets[ line ] = offset + unsorted_fh.close() + current_order = map( lambda x: x[1], sorted( map( lambda x: ( x[1], x[0] ), fasta_offsets.items() ) ) ) + return ( unsorted_filename, fasta_offsets, current_order ) + +def _write_sorted_fasta( sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename ): + unsorted_fh = open( unsorted_fasta_filename ) + sorted_fh = open( sorted_fasta_filename, 'wb+' ) + + for name in sorted_names: + offset = fasta_offsets[ name ] + unsorted_fh.seek( offset ) + sorted_fh.write( unsorted_fh.readline() ) + while True: + line = unsorted_fh.readline() + if not line or line.startswith( ">" ): + break + sorted_fh.write( line ) + unsorted_fh.close() + sorted_fh.close() + +def _int_to_roman( integer ): + if not isinstance( integer, int ): + raise TypeError, "expected integer, got %s" % type( integer ) + if not 0 < integer < 4000: + raise ValueError, "Argument must be between 1 and 3999, got %s" % str( integer ) + ints = ( 1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1 ) + nums = ( 'M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I' ) + result = "" + for i in range( len( ints ) ): + count = int( integer / ints[ i ] ) + result += nums[ i ] * count + integer -= ints[ i ] * count + return result + +def _sort_fasta_gatk( fasta_filename ): + ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) + sorted_names = map( str, range( 1, 100 ) ) + map( _int_to_roman, range( 1, 100 ) ) + [ 'X', 'Y', 'M' ] + #detect if we have chrN, or just N + has_chr = False + for chrom in sorted_names: + if "chr%s" % chrom in current_order: + has_chr = True + break + + if has_chr: + sorted_names = map( lambda x: "chr%s" % x, sorted_names) + else: + sorted_names.insert( 0, "MT" ) + sorted_names.extend( map( lambda x: "%s_random" % x, sorted_names ) ) + + existing_sorted_names = [] + for name in sorted_names: + # Append each chromosome only once. + if name in current_order and name not in existing_sorted_names: + existing_sorted_names.append( name ) + for name in current_order: + #TODO: confirm that non-canonical names do not need to be sorted specially + if name not in existing_sorted_names: + existing_sorted_names.append( name ) + + if existing_sorted_names == current_order: + shutil.move( unsorted_filename, fasta_filename ) + else: + _write_sorted_fasta( existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) + +def main(): + #Parse Command Line + parser = optparse.OptionParser() + parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' ) + parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' ) + parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' ) + parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' ) + parser.add_option( '-j', '--jar', dest='jar', action='store', type="string", default=None, help='GATK .jar file' ) + (options, args) = parser.parse_args() + + filename = args[0] + + params = json.loads( open( filename ).read() ) + target_directory = params[ 'output_data' ][0]['extra_files_path'] + os.mkdir( target_directory ) + data_manager_dict = {} + + if options.fasta_dbkey in [ None, '', '?' ]: + raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) ) + + sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description ) + + #build the index + build_picard_index( data_manager_dict, + options.fasta_filename, + target_directory, + options.fasta_dbkey, + sequence_id, + sequence_name, + options.jar, + data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME ) + + #save info to json file + open( filename, 'wb' ).write( json.dumps( data_manager_dict ) ) + +if __name__ == "__main__": main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/data_manager_gatk_picard_index_builder.xml Tue Apr 01 10:48:03 2014 -0400 @@ -0,0 +1,33 @@ +<tool id="gatk_picard_index_builder" name="Generate GATK-sorted Picard indexes" tool_type="manage_data" version="0.0.1"> + <description>builder</description> + <requirements> + <requirement type="package" version="0.1.18">samtools</requirement> + <requirement type="package" version="1.56.0">picard</requirement> + </requirements> + <command interpreter="python"> + data_manager_gatk_picard_index_builder.py "${out_file}" \ + --jar "\$JAVA_JAR_PATH/CreateSequenceDictionary.jar" \ + --fasta_filename "${all_fasta_source.fields.path}" \ + --fasta_dbkey "${all_fasta_source.fields.dbkey}" \ + --fasta_description "${all_fasta_source.fields.name}" \ + --data_table_name "gatk_picard_indexes" + </command> + <inputs> + <param name="all_fasta_source" type="select" label="Source FASTA Sequence"> + <options from_data_table="all_fasta"/> + </param> + <param type="text" name="sequence_name" value="" label="Name of sequence" /> + <param type="text" name="sequence_id" value="" label="ID for sequence" /> + </inputs> + <outputs> + <data name="out_file" format="data_manager_json"/> + </outputs> + + <help> + +.. class:: infomark + +**Notice:** If you leave name, description, or id blank, it will be generated automatically. + + </help> +</tool>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager_conf.xml Tue Apr 01 10:48:03 2014 -0400 @@ -0,0 +1,20 @@ +<?xml version="1.0"?> +<data_managers> + <data_manager tool_file="data_manager/data_manager_gatk_picard_index_builder.xml" id="gatk_picard_index_builder" version="0.0.1"> + <data_table name="gatk_picard_indexes"> + <output> + <column name="value" /> + <column name="dbkey" /> + <column name="name" /> + <column name="path" output_ref="out_file" > + <move type="directory" relativize_symlinks="True"> + <!-- <source>${path}</source>--> <!-- out_file.extra_files_path is used as base by default --> <!-- if no source, eg for type=directory, then refers to base --> + <target base="${GALAXY_DATA_MANAGER_DATA_PATH}">${dbkey}/gatk_picard_index/${value}</target> + </move> + <value_translation>${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/gatk_picard_index/${value}/${path}</value_translation> + <value_translation type="function">abspath</value_translation> + </column> + </output> + </data_table> + </data_manager> +</data_managers>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool-data/all_fasta.loc.sample Tue Apr 01 10:48:03 2014 -0400 @@ -0,0 +1,18 @@ +#This file lists the locations and dbkeys of all the fasta files +#under the "genome" directory (a directory that contains a directory +#for each build). The script extract_fasta.py will generate the file +#all_fasta.loc. This file has the format (white space characters are +#TAB characters): +# +#<unique_build_id> <dbkey> <display_name> <file_path> +# +#So, all_fasta.loc could look something like this: +# +#apiMel3 apiMel3 Honeybee (Apis mellifera): apiMel3 /path/to/genome/apiMel3/apiMel3.fa +#hg19canon hg19 Human (Homo sapiens): hg19 Canonical /path/to/genome/hg19/hg19canon.fa +#hg19full hg19 Human (Homo sapiens): hg19 Full /path/to/genome/hg19/hg19full.fa +# +#Your all_fasta.loc file should contain an entry for each individual +#fasta file. So there will be multiple fasta files for each build, +#such as with hg19 above. +#
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool-data/gatk_sorted_picard_index.loc.sample Tue Apr 01 10:48:03 2014 -0400 @@ -0,0 +1,26 @@ +#This is a sample file distributed with Galaxy that enables tools +#to use a directory of Picard dict and associated files. You will need +#to create these data files and then create a picard_index.loc file +#similar to this one (store it in this directory) that points to +#the directories in which those files are stored. The picard_index.loc +#file has this format (longer white space is the TAB character): +# +#<unique_build_id> <dbkey> <display_name> <fasta_file_path> +# +#So, for example, if you had hg18 indexed and stored in +#/depot/data2/galaxy/srma/hg18/, +#then the srma_index.loc entry would look like this: +# +#hg18 hg18 hg18 Pretty /depot/data2/galaxy/picard/hg18/hg18.fa +# +#and your /depot/data2/galaxy/srma/hg18/ directory +#would contain the following three files: +#hg18.fa +#hg18.dict +#hg18.fa.fai +# +#The dictionary file for each reference (ex. hg18.dict) must be +#created via Picard (http://picard.sourceforge.net). Note that +#the dict file does not have the .fa extension although the +#path list in the loc file does include it. +#
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool_data_table_conf.xml.sample Tue Apr 01 10:48:03 2014 -0400 @@ -0,0 +1,13 @@ +<!-- Use the file tool_data_table_conf.xml.oldlocstyle if you don't want to update your loc files as changed in revision 4550:535d276c92bc--> +<tables> + <!-- Locations of all fasta files under genome directory --> + <table name="all_fasta" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/all_fasta.loc" /> + </table> + <!-- Location of Picard dict files valid for GATK --> + <table name="gatk_picard_indexes" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/gatk_sorted_picard_index.loc" /> + </table> +</tables>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool_dependencies.xml Tue Apr 01 10:48:03 2014 -0400 @@ -0,0 +1,9 @@ +<?xml version="1.0"?> +<tool_dependency> + <package name="picard" version="1.56.0"> + <repository changeset_revision="7206dbf34dcd" name="package_picard_1_56_0" owner="devteam" prior_installation_required="False" toolshed="http://testtoolshed.g2.bx.psu.edu" /> + </package> + <package name="samtools" version="0.1.18"> + <repository changeset_revision="c0f72bdba484" name="package_samtools_0_1_18" owner="devteam" prior_installation_required="False" toolshed="http://testtoolshed.g2.bx.psu.edu" /> + </package> +</tool_dependency>