Mercurial > repos > matt-shirley > ncbi_epi_browse
view ncbi_connector.py @ 10:348092e55af7 draft
Uploaded
author | matt-shirley |
---|---|
date | Mon, 30 Sep 2013 12:46:14 -0400 |
parents | 58917de44665 |
children |
line wrap: on
line source
#!/usr/bin/env python # Retrieves data from external data source applications and stores in a dataset file. # Data source application parameters are temporarily stored in the dataset file. import socket import urllib import sys import os import optparse import xml.etree.ElementTree as ElementTree from galaxy import eggs #eggs needs to be imported so that galaxy.util can find docutils egg... from galaxy.util.json import from_json_string, to_json_string from galaxy.util import get_charset_from_http_headers import galaxy.model # need to import model before sniff to resolve a circular import dependency from galaxy.datatypes import sniff from galaxy.datatypes.registry import Registry from galaxy.jobs import TOOL_PROVIDED_JOB_METADATA_FILE assert sys.version_info[:2] >= ( 2, 4 ) def stop_err( msg ): sys.stderr.write( msg ) sys.exit() GALAXY_PARAM_PREFIX = 'GALAXY' GALAXY_ROOT_DIR = os.path.realpath( os.path.join( os.path.split( os.path.realpath( __file__ ) )[0], '..', '..' ) ) GALAXY_DATATYPES_CONF_FILE = os.path.join( GALAXY_ROOT_DIR, 'datatypes_conf.xml' ) def load_input_parameters( filename, erase_file = True ): datasource_params = {} try: json_params = from_json_string( open( filename, 'r' ).read() ) datasource_params = json_params.get( 'param_dict' ) except: json_params = None for line in open( filename, 'r' ): try: line = line.strip() fields = line.split( '\t' ) datasource_params[ fields[0] ] = fields[1] except: continue if erase_file: open( filename, 'w' ).close() #open file for writing, then close, removes params from file return json_params, datasource_params def deconstruct_multi_filename( multi_filename ): keys = ['primary', 'id', 'name', 'visible', 'file_type', 'dbkey'] return ( dict( zip( keys, multi_filename.split('_') ) ) ) def construct_multi_filename( id, name, file_type, dbkey=None): """ Implementation of *Number of Output datasets cannot be determined until tool run* from documentation_. .. _documentation: http://wiki.galaxyproject.org/Admin/Tools/Multiple%20Output%20Files """ if dbkey: filename = "%s_%s_%s_%s_%s_%s" % ( 'primary', id, name, 'visible', file_type, dbkey ) else: filename = "%s_%s_%s_%s_%s" % ( 'primary', id, name, 'visible', file_type ) return filename def xml_save_to_newfile_directory( xmlfile, directory, id, enhanced_handling=False, datatypes_registry=None ): """ Open xmlfile, parse all URLs to fetch. Fetch each file, saving to ``directory``. Save first file for last and return for ``page``. Schema :: <?xml version="1.0"?> <!DOCTYPE downloads [ <!ELEMENT downloads (download)> <!ELEMENT download (resource,url,meta)> <!ELEMENT resource (#PCDATA)> <!ELEMENT url (#PCDATA)> <!ELEMENT id (#PCDATA)> <!ELEMENT meta (id,format,type,summary,feature,genome,technique,instrument,assay,sample,description,PMID)> <!ELEMENT id (#PCDATA)> <!ELEMENT format (#PCDATA)> <!ELEMENT type (#PCDATA)> <!ELEMENT summary (#PCDATA)> <!ELEMENT feature (#PCDATA)> <!ELEMENT genome (#PCDATA)> <!ELEMENT technique (#PCDATA)> <!ELEMENT instrument (#PCDATA)> <!ELEMENT assay (#PCDATA)> <!ELEMENT sample (#PCDATA)> <!ELEMENT description (#PCDATA)> <!ELEMENT PMID (#PCDATA)> ]> """ root = ElementTree.fromstring(xmlfile.read()) if root.tag != 'downloads': stop_err( 'The remote data source application has not sent back a URL parameter in the request.' ) # traverse xml schema to find URLs, names, and dbkeys files_to_fetch = [] complete = False for child in root: if (child.tag == 'download') and (complete == True): files_to_fetch.append( ( construct_multi_filename( id, name, file_type, dbkey ), URL ) ) for sub in child: if sub.tag == 'url': URL = sub.text elif sub.tag == 'meta': for meta in sub: if meta.tag == 'id': name = meta.text elif meta.tag == 'genome': dbkey = meta.text elif meta.tag == 'format': file_type = meta.text # hit the end of our schema files_to_fetch.append( ( construct_multi_filename( id, name, file_type, dbkey ), URL ) ) if len(files_to_fetch) > 1: for filename, URL in files_to_fetch[1:]: try: cur_filename = os.path.join( directory, filename ) page = urllib.urlopen( URL ) multi_dict = deconstruct_multi_filename( filename ) cur_filename, is_multi_byte = sniff.stream_to_open_named_file( page, os.open( cur_filename, os.O_WRONLY | os.O_CREAT ), cur_filename, source_encoding=get_charset_from_http_headers( page.headers ) ) if enhanced_handling: ext = sniff.handle_uploaded_dataset_file( cur_filename, datatypes_registry, ext = multi_dict['file_type'], is_multi_byte = is_multi_byte ) except Exception, e: stop_err( 'Unable to fetch %s:\n%s' % ( URL, e ) ) # pass page back to main return ( files_to_fetch[0][0], urllib.urlopen( files_to_fetch[0][1] ) ) def __main__(): # Parse the command line options usage = "Usage: ncbi_connector.py filename max_size [options]" parser = optparse.OptionParser(usage = usage) parser.add_option("-x", "--xml", action="store_true", dest="xmlfile", help="filename defines external resources as xml") parser.add_option("-i", "--id", type="string", action="store", dest="id", help="output id") parser.add_option("-p", "--path", type="string", action="store", dest="newfilepath", help="new file path") (options, args) = parser.parse_args() filename = args[0] try: max_file_size = int( args[1] ) except: max_file_size = 0 job_params, params = load_input_parameters( filename ) if job_params is None: #using an older tabular file enhanced_handling = False job_params = dict( param_dict = params ) job_params[ 'output_data' ] = [ dict( out_data_name = 'output', ext = 'data', file_name = filename, extra_files_path = None ) ] job_params[ 'job_config' ] = dict( GALAXY_ROOT_DIR=GALAXY_ROOT_DIR, GALAXY_DATATYPES_CONF_FILE=GALAXY_DATATYPES_CONF_FILE, TOOL_PROVIDED_JOB_METADATA_FILE = TOOL_PROVIDED_JOB_METADATA_FILE ) else: enhanced_handling = True json_file = open( job_params[ 'job_config' ][ 'TOOL_PROVIDED_JOB_METADATA_FILE' ], 'w' ) #specially named file for output junk to pass onto set metadata datatypes_registry = Registry() datatypes_registry.load_datatypes( root_dir = job_params[ 'job_config' ][ 'GALAXY_ROOT_DIR' ], config = job_params[ 'job_config' ][ 'GALAXY_DATATYPES_CONF_FILE' ] ) URL = params.get( 'URL', None ) #using exactly URL indicates that only one dataset is being downloaded URL_method = params.get( 'URL_method', None ) # The Python support for fetching resources from the web is layered. urllib uses the httplib # library, which in turn uses the socket library. As of Python 2.3 you can specify how long # a socket should wait for a response before timing out. By default the socket module has no # timeout and can hang. Currently, the socket timeout is not exposed at the httplib or urllib2 # levels. However, you can set the default timeout ( in seconds ) globally for all sockets by # doing the following. socket.setdefaulttimeout( 600 ) for data_dict in job_params[ 'output_data' ]: cur_filename = data_dict.get( 'file_name', filename ) cur_URL = params.get( '%s|%s|URL' % ( GALAXY_PARAM_PREFIX, data_dict[ 'out_data_name' ] ), URL ) if not cur_URL: open( cur_filename, 'w' ).write( "" ) stop_err( 'The remote data source application has not sent back a URL parameter in the request.' ) # The following calls to urllib.urlopen() will use the above default timeout try: if not URL_method or URL_method == 'get': page = urllib.urlopen( cur_URL ) elif URL_method == 'post': page = urllib.urlopen( cur_URL, urllib.urlencode( params ) ) except Exception, e: stop_err( 'The remote data source application may be off line, please try again later. Error: %s' % str( e ) ) if max_file_size: file_size = int( page.info().get( 'Content-Length', 0 ) ) if file_size > max_file_size: stop_err( 'The size of the data (%d bytes) you have requested exceeds the maximum allowed (%d bytes) on this server.' % ( file_size, max_file_size ) ) # If xmlfile is provided, fetch files 2 through n and save to new_file_path, replace page with file 1 if options.xmlfile: multi_filename, page = xml_save_to_newfile_directory( page, options.newfilepath, options.id, enhanced_handling, datatypes_registry ) multi_dict = deconstruct_multi_filename( multi_filename ) #do sniff stream for multi_byte try: cur_filename, is_multi_byte = sniff.stream_to_open_named_file( page, os.open( cur_filename, os.O_WRONLY | os.O_CREAT ), cur_filename, source_encoding=get_charset_from_http_headers( page.headers ) ) except Exception, e: stop_err( 'Unable to fetch %s:\n%s' % ( cur_URL, e ) ) #here import checks that upload tool performs if enhanced_handling: try: ext = sniff.handle_uploaded_dataset_file( filename, datatypes_registry, ext = data_dict[ 'ext' ], is_multi_byte = is_multi_byte ) except Exception, e: stop_err( str( e ) ) info = dict( type = 'dataset', dataset_id = data_dict[ 'dataset_id' ], ext = ext) json_file.write( "%s\n" % to_json_string( info ) ) if __name__ == "__main__": __main__()