# HG changeset patch
# User ieguinoa
# Date 1534259989 14400
# Node ID c5dea2080109f5ec3c77bc00a2898284364f2dd8
# Parent 6cd60ba8a842e59a68e093ab74531cf647e06554
Uploaded
diff -r 6cd60ba8a842 -r c5dea2080109 .shed.yml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/.shed.yml Tue Aug 14 11:19:49 2018 -0400
@@ -0,0 +1,19 @@
+categories:
+- Data Managers
+description: Pre-generate indexes for Salmon
+homepage_url: https://github.com/COMBINE-lab/salmon
+long_description: |
+ Salmon is a wicked-fast program to produce a highly-accurate,
+ transcript-level quantification estimates from RNA-seq data.
+ Salmon achieves is accuracy and speed via a number of different innovations,
+ including the use of quasi-mapping (accurate but fast-to-compute proxies for traditional read alignments),
+ and massively-parallel stochastic collapsed variational inference.
+ The result is a versatile tool that fits nicely into many differnt pipelines.
+ For example, you can choose to make use of our quasi-mapping algorithm by providing
+ Salmon with raw sequencing reads, or, if it is more convenient, you can provide
+ Salmon with regular alignments (e.g. an unsorted BAM file produced with your favorite aligner),
+ and it will use the same wicked-fast, state-of-the-art inference algorithm to estimate transcript-level abundances for your experiment.
+name: data_manager_salmon_index_builder
+owner: iuc
+remote_repository_url: https://github.com/ieguinoa/data_manager_salmon_index_builder
+type: unrestricted
diff -r 6cd60ba8a842 -r c5dea2080109 README.md
--- a/README.md Tue Aug 14 11:14:52 2018 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-# data_manager_fetch_gff
-Galaxy Data Manager to fetch gene annotation files
diff -r 6cd60ba8a842 -r c5dea2080109 data_manager/data_manager_fetch_gff.py
--- a/data_manager/data_manager_fetch_gff.py Tue Aug 14 11:14:52 2018 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,445 +0,0 @@
-#!/usr/bin/env python
-#Dan Blankenberg
-
-import sys
-import os
-import tempfile
-import shutil
-import optparse
-from ftplib import FTP
-import tarfile
-import zipfile
-import gzip
-import bz2
-try:
- # For Python 3.0 and later
- from urllib.request import urlopen
- from io import BytesIO as StringIO
- from io import UnsupportedOperation
-except ImportError:
- # Fall back to Python 2's urllib2
- from urllib2 import urlopen
- from StringIO import StringIO
- UnsupportedOperation = AttributeError
-from json import loads, dumps
-
-
-CHUNK_SIZE = 2**20 # 1mb
-
-DATA_TABLE_NAME = 'all_gff'
-
-def cleanup_before_exit( tmp_dir ):
- if tmp_dir and os.path.exists( tmp_dir ):
- shutil.rmtree( tmp_dir )
-
-
-def stop_err(msg):
- sys.stderr.write(msg)
- sys.exit(1)
-
-
-def get_dbkey_dbname_id_name( params, dbkey_description=None ):
-# dbkey = params['param_dict']['dbkey_source']['dbkey']
- #TODO: ensure sequence_id is unique and does not already appear in location file
- sequence_id = params['param_dict']['sequence_id']
- if not sequence_id:
- sequence_id = dbkey #uuid.uuid4() generate and use an uuid instead?
-
-# if params['param_dict']['dbkey_source']['dbkey_source_selector'] == 'new':
-# dbkey_name = params['param_dict']['dbkey_source']['dbkey_name']
-# if not dbkey_name:
-# dbkey_name = dbkey
-# else:
-# dbkey_name = None
- dbkey = params['param_dict']['dbkey']
- dbkey_name = dbkey_description
- sequence_name = params['param_dict']['sequence_name']
- if not sequence_name:
- sequence_name = dbkey_description
- if not sequence_name:
- sequence_name = dbkey
- return dbkey, dbkey_name, sequence_id, sequence_name
-
-
-def _get_files_in_ftp_path( ftp, path ):
- path_contents = []
- ftp.retrlines( 'MLSD %s' % ( path ), path_contents.append )
- return [ line.split( ';' )[ -1 ].lstrip() for line in path_contents ]
-
-
-def _get_stream_readers_for_tar( fh, tmp_dir ):
- fasta_tar = tarfile.open( fileobj=fh, mode='r:*' )
- return [x for x in [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()] if x]
-
-
-def _get_stream_readers_for_zip( fh, tmp_dir ):
- """
- Unpacks all archived files in a zip file.
- Individual files will be concatenated (in _stream_fasta_to_file)
- """
- fasta_zip = zipfile.ZipFile( fh, 'r' )
- rval = []
- for member in fasta_zip.namelist():
- fasta_zip.extract( member, tmp_dir )
- rval.append( open( os.path.join( tmp_dir, member ), 'rb' ) )
- return rval
-
-
-def _get_stream_readers_for_gzip( fh, tmp_dir ):
- return [ gzip.GzipFile( fileobj=fh, mode='rb') ]
-
-
-def _get_stream_readers_for_bz2( fh, tmp_dir ):
- return [ bz2.BZ2File( fh.name, 'rb') ]
-
-
-def sort_fasta( fasta_filename, sort_method, params ):
- if sort_method is None:
- return
- assert sort_method in SORTING_METHODS, ValueError( "%s is not a valid sorting option." % sort_method )
- return SORTING_METHODS[ sort_method ]( fasta_filename, params )
-
-
-def _move_and_index_fasta_for_sorting( fasta_filename ):
- unsorted_filename = tempfile.NamedTemporaryFile().name
- shutil.move( fasta_filename, unsorted_filename )
- fasta_offsets = {}
- unsorted_fh = open( unsorted_filename )
- while True:
- offset = unsorted_fh.tell()
- line = unsorted_fh.readline()
- if not line:
- break
- if line.startswith( ">" ):
- line = line.split( None, 1 )[0][1:]
- fasta_offsets[ line ] = offset
- unsorted_fh.close()
- current_order = map( lambda x: x[1], sorted( map( lambda x: ( x[1], x[0] ), fasta_offsets.items() ) ) )
- return ( unsorted_filename, fasta_offsets, current_order )
-
-
-def _write_sorted_fasta( sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename ):
- unsorted_fh = open( unsorted_fasta_filename )
- sorted_fh = open( sorted_fasta_filename, 'wb+' )
-
- for name in sorted_names:
- offset = fasta_offsets[ name ]
- unsorted_fh.seek( offset )
- sorted_fh.write( unsorted_fh.readline() )
- while True:
- line = unsorted_fh.readline()
- if not line or line.startswith( ">" ):
- break
- sorted_fh.write( line )
- unsorted_fh.close()
- sorted_fh.close()
-
-
-def _sort_fasta_as_is( fasta_filename, params ):
- return
-
-def _sort_fasta_lexicographical( fasta_filename, params ):
- ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename )
- sorted_names = sorted( fasta_offsets.keys() )
- if sorted_names == current_order:
- shutil.move( unsorted_filename, fasta_filename )
- else:
- _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename )
-
-
-def _sort_fasta_gatk( fasta_filename, params ):
- #This method was added by reviewer request.
- ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename )
- sorted_names = map( str, range( 1, 23 ) ) + [ 'X', 'Y' ]
- #detect if we have chrN, or just N
- has_chr = False
- for chrom in sorted_names:
- if "chr%s" % chrom in current_order:
- has_chr = True
- break
-
- if has_chr:
- sorted_names = map( lambda x: "chr%s" % x, sorted_names)
- sorted_names.insert( 0, "chrM" )
- else:
- sorted_names.insert( 0, "MT" )
- sorted_names.extend( map( lambda x: "%s_random" % x, sorted_names ) )
-
- existing_sorted_names = []
- for name in sorted_names:
- if name in current_order:
- existing_sorted_names.append( name )
- for name in current_order:
- #TODO: confirm that non-canonical names do not need to be sorted specially
- if name not in existing_sorted_names:
- existing_sorted_names.append( name )
-
- if existing_sorted_names == current_order:
- shutil.move( unsorted_filename, fasta_filename )
- else:
- _write_sorted_fasta( existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename )
-
-
-def _sort_fasta_custom( fasta_filename, params ):
- ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename )
- sorted_names = []
- for id_repeat in params['param_dict']['sorting']['sequence_identifiers']:
- sorted_names.append( id_repeat[ 'identifier' ] )
- handle_not_listed = params['param_dict']['sorting']['handle_not_listed_selector']
- if handle_not_listed.startswith( 'keep' ):
- add_list = []
- for name in current_order:
- if name not in sorted_names:
- add_list.append( name )
- if add_list:
- if handle_not_listed == 'keep_append':
- sorted_names.extend( add_list )
- else:
- add_list.extend( sorted_names )
- sorted_names = add_list
- if sorted_names == current_order:
- shutil.move( unsorted_filename, fasta_filename )
- else:
- _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename )
-
-
-def _download_file(start, fh):
- tmp = tempfile.NamedTemporaryFile()
- tmp.write(start)
- tmp.write(fh.read())
- tmp.flush()
- tmp.seek(0)
- return tmp
-
-
-def get_stream_reader(fh, tmp_dir):
- """
- Check if file is compressed and return correct stream reader.
- If file has to be downloaded, do it now.
- """
- magic_dict = {
- b"\x1f\x8b\x08": _get_stream_readers_for_gzip,
- b"\x42\x5a\x68": _get_stream_readers_for_bz2,
- b"\x50\x4b\x03\x04": _get_stream_readers_for_zip,
- }
- start_of_file = fh.read(CHUNK_SIZE)
- try:
- fh.seek(0)
- except UnsupportedOperation: # This is if fh has been created by urlopen
- fh = _download_file(start_of_file, fh)
- for k,v in magic_dict.items():
- if start_of_file.startswith(k):
- return v(fh, tmp_dir)
- try: # Check if file is tar file
- if tarfile.open(fileobj=StringIO(start_of_file)):
- return _get_stream_readers_for_tar(fh, tmp_dir)
- except tarfile.ReadError:
- pass
- return fh
-
-
-def _get_ucsc_download_address(params, dbkey):
- """
- Check if we can find the correct file for the supplied dbkey on UCSC's FTP server
- """
- UCSC_FTP_SERVER = 'hgdownload.cse.ucsc.edu'
- UCSC_DOWNLOAD_PATH = '/goldenPath/%s/bigZips/'
- COMPRESSED_EXTENSIONS = ['.tar.gz', '.tgz', '.tar.bz2', '.zip', '.fa.gz', '.fa.bz2']
-
- email = params['param_dict']['__user_email__']
- if not email:
- email = 'anonymous@example.com'
-
- ucsc_dbkey = params['param_dict']['reference_source']['requested_dbkey'] or dbkey
- UCSC_CHROM_FA_FILENAMES = ['%s.chromFa' % ucsc_dbkey, 'chromFa', ucsc_dbkey]
-
- ftp = FTP(UCSC_FTP_SERVER)
- ftp.login('anonymous', email)
-
- ucsc_path = UCSC_DOWNLOAD_PATH % ucsc_dbkey
- path_contents = _get_files_in_ftp_path(ftp, ucsc_path)
- ftp.quit()
-
- for ucsc_chrom_fa_filename in UCSC_CHROM_FA_FILENAMES:
- for ext in COMPRESSED_EXTENSIONS:
- if "%s%s" % (ucsc_chrom_fa_filename, ext) in path_contents:
- ucsc_file_name = "%s%s%s" % (ucsc_path, ucsc_chrom_fa_filename, ext)
- return "ftp://%s%s" % (UCSC_FTP_SERVER, ucsc_file_name)
-
- raise Exception('Unable to determine filename for UCSC Genome for %s: %s' % (ucsc_dbkey, path_contents))
-
-def add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params):
- for data_table_name, data_table_entry in _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params ):
- if data_table_entry:
- _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name )
-
-
-def download_from_ucsc( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ):
- url = _get_ucsc_download_address(params, dbkey)
- fasta_readers = get_stream_reader(urlopen(url), tmp_dir)
- add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params)
-
-
-def download_from_ncbi( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ):
- NCBI_DOWNLOAD_URL = 'http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta' #FIXME: taken from dave's genome manager...why some japan site?
- requested_identifier = params['param_dict']['reference_source']['requested_identifier']
- url = NCBI_DOWNLOAD_URL % requested_identifier
- fasta_readers = get_stream_reader(urlopen(url), tmp_dir)
- add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params)
-
-
-def download_from_url( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ):
- urls = filter( bool, map( lambda x: x.strip(), params['param_dict']['reference_source']['user_url'].split( '\n' ) ) )
- fasta_readers = [ get_stream_reader(urlopen( url ), tmp_dir) for url in urls ]
- add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id,sequence_name, params)
-
-
-def download_from_history( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ):
- #TODO: allow multiple FASTA input files
- input_filename = params['param_dict']['reference_source']['input_fasta']
- if isinstance( input_filename, list ):
- fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ]
- else:
- fasta_readers = get_stream_reader(open(input_filename), tmp_dir)
- add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params)
-
-
-def copy_from_directory( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ):
- input_filename = params['param_dict']['reference_source']['fasta_filename']
- create_symlink = params['param_dict']['reference_source']['create_symlink'] == 'create_symlink'
- if create_symlink:
- data_table_entries = _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name )
- else:
- if isinstance( input_filename, list ):
- fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ]
- else:
- fasta_readers = get_stream_reader(open(input_filename), tmp_dir)
- data_table_entries = _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params )
- for data_table_name, data_table_entry in data_table_entries:
- if data_table_entry:
- _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name )
-
-
-def _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ):
- data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
- data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get( DATA_TABLE_NAME, [] )
- data_manager_dict['data_tables'][data_table_name].append( data_table_entry )
- return data_manager_dict
-
-
-def _stream_fasta_to_file( fasta_stream, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params, close_stream=True ):
- fasta_base_filename = "%s.gff" % sequence_id
- fasta_filename = os.path.join( target_directory, fasta_base_filename )
- with open( fasta_filename, 'wb+' ) as fasta_writer:
-
- if isinstance( fasta_stream, list ) and len( fasta_stream ) == 1:
- fasta_stream = fasta_stream[0]
-
- if isinstance( fasta_stream, list ):
- last_char = None
- for fh in fasta_stream:
- if last_char not in [ None, '\n', '\r', b'\n', b'\r' ]:
- fasta_writer.write( b'\n' )
- while True:
- data = fh.read( CHUNK_SIZE )
- if data:
- fasta_writer.write( data )
- last_char = data[-1]
- else:
- break
- if close_stream:
- fh.close()
- else:
- while True:
- data = fasta_stream.read( CHUNK_SIZE )
- if data:
- fasta_writer.write( data )
- else:
- break
- if close_stream:
- fasta_stream.close()
-
- #sort_fasta( fasta_filename, params['param_dict']['sorting']['sort_selector'], params )
-
-
- return [ ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ]
-
-
-def compute_fasta_length( fasta_file, out_file, keep_first_word=False ):
-
- infile = fasta_file
- out = open( out_file, 'w')
-
- fasta_title = ''
- seq_len = 0
-
- first_entry = True
-
- for line in open( infile ):
- line = line.strip()
- if not line or line.startswith( '#' ):
- continue
- if line[0] == '>':
- if first_entry == False:
- if keep_first_word:
- fasta_title = fasta_title.split()[0]
- out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) )
- else:
- first_entry = False
- fasta_title = line
- seq_len = 0
- else:
- seq_len += len(line)
-
- # last fasta-entry
- if keep_first_word:
- fasta_title = fasta_title.split()[0]
- out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) )
- out.close()
-
-
-def _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ):
- fasta_base_filename = "%s.fa" % sequence_id
- fasta_filename = os.path.join( target_directory, fasta_base_filename )
- os.symlink( input_filename, fasta_filename )
- return [ ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ]
-
-
-REFERENCE_SOURCE_TO_DOWNLOAD = dict( ucsc=download_from_ucsc, ncbi=download_from_ncbi, url=download_from_url, history=download_from_history, directory=copy_from_directory )
-
-SORTING_METHODS = dict( as_is=_sort_fasta_as_is, lexicographical=_sort_fasta_lexicographical, gatk=_sort_fasta_gatk, custom=_sort_fasta_custom )
-
-
-def main():
- #Parse Command Line
- parser = optparse.OptionParser()
- parser.add_option( '-d', '--dbkey_description', dest='dbkey_description', action='store', type="string", default=None, help='dbkey_description' )
- parser.add_option( '-t', '--type', dest='file_type', action='store', type='string', default=None, help='file_type')
- (options, args) = parser.parse_args()
-
- filename = args[0]
- global DATA_TABLE_NAME
- if options.file_type == 'representative':
- DATA_TABLE_NAME= 'representative_gff'
- params = loads( open( filename ).read() )
- target_directory = params[ 'output_data' ][0]['extra_files_path']
- os.mkdir( target_directory )
- data_manager_dict = {}
-
- dbkey, dbkey_name, sequence_id, sequence_name = get_dbkey_dbname_id_name( params, dbkey_description=options.dbkey_description )
-
- if dbkey in [ None, '', '?' ]:
- raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
-
- # Create a tmp_dir, in case a zip file needs to be uncompressed
- tmp_dir = tempfile.mkdtemp()
- #Fetch the FASTA
- try:
- REFERENCE_SOURCE_TO_DOWNLOAD[ params['param_dict']['reference_source']['reference_source_selector'] ]( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir )
- finally:
- cleanup_before_exit(tmp_dir)
- #save info to json file
- open( filename, 'wb' ).write( dumps( data_manager_dict ).encode() )
-
-if __name__ == "__main__":
- main()
diff -r 6cd60ba8a842 -r c5dea2080109 data_manager/data_manager_fetch_gff.xml
--- a/data_manager/data_manager_fetch_gff.xml Tue Aug 14 11:14:52 2018 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-
- fetching
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-**What it does**
-
-Fetches a gff file from various sources (URL, Galaxy History, or a server directory) and populates the "all_gff" data table.
-
-------
-
-
-
-.. class:: infomark
-
-**Notice:** If you leave name, description, or id blank, it will be generated automatically.
-
-
-
diff -r 6cd60ba8a842 -r c5dea2080109 data_manager/salmon_index_builder.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/salmon_index_builder.py Tue Aug 14 11:19:49 2018 -0400
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# Based heavily on the kallisto data manager wrapper script by iuc
+from __future__ import print_function
+
+import argparse
+import os
+import subprocess
+import sys
+from json import dumps, loads
+
+DEFAULT_DATA_TABLE_NAME = "salmon_indexes"
+
+
+def get_id_name( params, dbkey, fasta_description=None):
+ # TODO: ensure sequence_id is unique and does not already appear in location file
+ sequence_id = params['param_dict']['sequence_id']
+ if not sequence_id:
+ sequence_id = dbkey
+
+ sequence_name = params['param_dict']['sequence_name']
+ if not sequence_name:
+ sequence_name = fasta_description
+ if not sequence_name:
+ sequence_name = dbkey
+ return sequence_id, sequence_name
+
+
+def build_salmon_index( data_manager_dict, options, params, sequence_id, sequence_name ):
+ data_table_name = options.data_table_name or DEFAULT_DATA_TABLE_NAME
+ target_directory = params[ 'output_data' ][0]['extra_files_path']
+ if not os.path.exists( target_directory ):
+ os.mkdir( target_directory )
+ if options.kmer_size != '':
+ args.append('-k')
+ args.append(options.kmer_size)
+ args.extend( [ '-t' , options.fasta_filename, '-i', sequence_id ] )
+ proc = subprocess.Popen( args=args, shell=False, cwd=target_directory )
+ return_code = proc.wait()
+ if return_code:
+ print("Error building index.", file=sys.stderr)
+ sys.exit( return_code )
+ data_table_entry = dict( value=sequence_id, dbkey=options.fasta_dbkey, name=sequence_name, path=sequence_id )
+ _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
+
+
+def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
+ data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
+ data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
+ data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
+ return data_manager_dict
+
+
+def main():
+ # Parse Command Line
+ parser = argparse.ArgumentParser()
+ parser.add_argument( '--output', dest='output', action='store', type=str, default=None )
+ parser.add_argument( '--fasta_filename', dest='fasta_filename', action='store', type=str, default=None )
+ parser.add_argument( '--fasta_dbkey', dest='fasta_dbkey', action='store', type=str, default=None )
+ parser.add_argument( '--fasta_description', dest='fasta_description', action='store', type=str, default=None )
+ parser.add_argument( '--data_table_name', dest='data_table_name', action='store', type=str, default='salmon_indexes' )
+ parser.add_argument( '-k', '--kmer_size', dest='kmer_size', action='store', type=str, help='kmer_size' )
+ options = parser.parse_args()
+
+ filename = options.output
+
+ params = loads( open( filename ).read() )
+ data_manager_dict = {}
+
+ if options.fasta_dbkey in [ None, '', '?' ]:
+ raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) )
+
+ sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description )
+
+ # build the index
+ build_salmon_index( data_manager_dict, options, params, sequence_id, sequence_name )
+
+ # save info to json file
+ open( filename, 'w' ).write( dumps( data_manager_dict ) )
+
+
+if __name__ == "__main__":
+ main()
diff -r 6cd60ba8a842 -r c5dea2080109 data_manager/salmon_index_builder.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/salmon_index_builder.xml Tue Aug 14 11:19:49 2018 -0400
@@ -0,0 +1,38 @@
+
+ index builder
+
+ salmon
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 6cd60ba8a842 -r c5dea2080109 data_manager_conf.xml
--- a/data_manager_conf.xml Tue Aug 14 11:14:52 2018 -0400
+++ b/data_manager_conf.xml Tue Aug 14 11:19:49 2018 -0400
@@ -1,32 +1,17 @@
-
-
+
+
-
-
-
diff -r 6cd60ba8a842 -r c5dea2080109 tool-data/all_fasta.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/all_fasta.loc.sample Tue Aug 14 11:19:49 2018 -0400
@@ -0,0 +1,18 @@
+#This file lists the locations and dbkeys of all the fasta files
+#under the "genome" directory (a directory that contains a directory
+#for each build). The script extract_fasta.py will generate the file
+#all_fasta.loc. This file has the format (white space characters are
+#TAB characters):
+#
+#
+#
+#So, all_fasta.loc could look something like this:
+#
+#apiMel3 apiMel3 Honeybee (Apis mellifera): apiMel3 /path/to/genome/apiMel3/apiMel3.fa
+#hg19canon hg19 Human (Homo sapiens): hg19 Canonical /path/to/genome/hg19/hg19canon.fa
+#hg19full hg19 Human (Homo sapiens): hg19 Full /path/to/genome/hg19/hg19full.fa
+#
+#Your all_fasta.loc file should contain an entry for each individual
+#fasta file. So there will be multiple fasta files for each build,
+#such as with hg19 above.
+#
diff -r 6cd60ba8a842 -r c5dea2080109 tool-data/all_gff.loc.sample
--- a/tool-data/all_gff.loc.sample Tue Aug 14 11:14:52 2018 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-#The all_gff.loc file has this format:
-#
-#
diff -r 6cd60ba8a842 -r c5dea2080109 tool-data/representative_gff.loc.sample
--- a/tool-data/representative_gff.loc.sample Tue Aug 14 11:14:52 2018 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-#The representative_gff.loc file has this format:
-#
-#
diff -r 6cd60ba8a842 -r c5dea2080109 tool-data/salmon_indexes.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/salmon_indexes.loc.sample Tue Aug 14 11:19:49 2018 -0400
@@ -0,0 +1,28 @@
+# salmon_indexes.loc.sample
+# This is a *.loc.sample file distributed with Galaxy that enables tools
+# to use a directory of indexed data files. This one is for Salmon.
+# See the wiki: http://wiki.galaxyproject.org/Admin/NGS%20Local%20Setup
+# First create these data files and save them in your own data directory structure.
+# Then, create a kallisto_indexes.loc file to use those indexes with tools.
+# Copy this file, save it with the same name (minus the .sample),
+# follow the format examples, and store the result in this directory.
+# The file should include an one line entry for each index set.
+# The path points to the "basename" for the set, not a specific file.
+# It has four text columns seperated by TABS.
+#
+#
+#
+# So, for example, if you had sacCer3 indexes stored in:
+#
+# /depot/data2/galaxy/sacCer3/salmon_indexes/
+#
+# then the salmon_indexes.loc entry could look like this:
+#
+#sacCer3 sacCer3 S. cerevisiae Apr. 2011 (SacCer_Apr2011/sacCer3) (sacCer3) /depot/data2/galaxy/sacCer3/salmon_indexes
+#
+#More examples:
+#
+#mm10 mm10 Mouse (mm10) /depot/data2/galaxy/salmon_indexes/mm10
+#dm3 dm3 D. melanogaster (dm3) /depot/data2/galaxy/salmon_indexes/dm3
+#
+#
diff -r 6cd60ba8a842 -r c5dea2080109 tool_data_table_conf.xml.sample
--- a/tool_data_table_conf.xml.sample Tue Aug 14 11:14:52 2018 -0400
+++ b/tool_data_table_conf.xml.sample Tue Aug 14 11:19:49 2018 -0400
@@ -1,5 +1,12 @@
-
-
-
+
+
+ value, dbkey, name, path
+
+
+
+
+ value, dbkey, name, path
+
+