comparison cuffmerge_wrapper.py @ 0:dbbd37e013aa

Uploaded tool tarball.
author devteam
date Tue, 01 Oct 2013 12:57:24 -0400
parents
children 5b285b6e4ee3
comparison
equal deleted inserted replaced
-1:000000000000 0:dbbd37e013aa
1 #!/usr/bin/env python
2
3 # Supports Cuffmerge versions 1.3 and newer.
4
5 import optparse, os, shutil, subprocess, sys, tempfile
6
7 def stop_err( msg ):
8 sys.stderr.write( '%s\n' % msg )
9 sys.exit()
10
11 # Copied from sam_to_bam.py:
12 def check_seq_file( dbkey, cached_seqs_pointer_file ):
13 seq_path = ''
14 for line in open( cached_seqs_pointer_file ):
15 line = line.rstrip( '\r\n' )
16 if line and not line.startswith( '#' ) and line.startswith( 'index' ):
17 fields = line.split( '\t' )
18 if len( fields ) < 3:
19 continue
20 if fields[1] == dbkey:
21 seq_path = fields[2].strip()
22 break
23 return seq_path
24
25 def __main__():
26 #Parse Command Line
27 parser = optparse.OptionParser()
28 parser.add_option( '-g', dest='ref_annotation', help='An optional "reference" annotation GTF. Each sample is matched against this file, and sample isoforms are tagged as overlapping, matching, or novel where appropriate. See the refmap and tmap output file descriptions below.' )
29 parser.add_option( '-s', dest='use_seq_data', action="store_true", help='Causes cuffmerge to look into for fasta files with the underlying genomic sequences (one file per contig) against which your reads were aligned for some optional classification functions. For example, Cufflinks transcripts consisting mostly of lower-case bases are classified as repeats. Note that <seq_dir> must contain one fasta file per reference chromosome, and each file must be named after the chromosome, and have a .fa or .fasta extension.')
30 parser.add_option( '-p', '--num-threads', dest='num_threads', help='Use this many threads to align reads. The default is 1.' )
31
32
33 # Wrapper / Galaxy options.
34 parser.add_option( '', '--dbkey', dest='dbkey', help='The build of the reference dataset' )
35 parser.add_option( '', '--index_dir', dest='index_dir', help='GALAXY_DATA_INDEX_DIR' )
36 parser.add_option( '', '--ref_file', dest='ref_file', help='The reference dataset from the history' )
37
38 # Outputs.
39 parser.add_option( '', '--merged-transcripts', dest='merged_transcripts' )
40
41 (options, args) = parser.parse_args()
42
43 # output version # of tool
44 try:
45 tmp = tempfile.NamedTemporaryFile().name
46 tmp_stdout = open( tmp, 'wb' )
47 proc = subprocess.Popen( args='cuffmerge -v 2>&1', shell=True, stdout=tmp_stdout )
48 tmp_stdout.close()
49 returncode = proc.wait()
50 stdout = None
51 for line in open( tmp_stdout.name, 'rb' ):
52 if line.lower().find( 'merge_cuff_asms v' ) >= 0:
53 stdout = line.strip()
54 break
55 if stdout:
56 sys.stdout.write( '%s\n' % stdout )
57 else:
58 raise Exception
59 except:
60 sys.stdout.write( 'Could not determine Cuffmerge version\n' )
61
62 # Set/link to sequence file.
63 if options.use_seq_data:
64 if options.ref_file != 'None':
65 # Sequence data from history.
66 # Create symbolic link to ref_file so that index will be created in working directory.
67 seq_path = "ref.fa"
68 os.symlink( options.ref_file, seq_path )
69 else:
70 # Sequence data from loc file.
71 cached_seqs_pointer_file = os.path.join( options.index_dir, 'sam_fa_indices.loc' )
72 if not os.path.exists( cached_seqs_pointer_file ):
73 stop_err( 'The required file (%s) does not exist.' % cached_seqs_pointer_file )
74 # If found for the dbkey, seq_path will look something like /galaxy/data/equCab2/sam_index/equCab2.fa,
75 # and the equCab2.fa file will contain fasta sequences.
76 seq_path = check_seq_file( options.dbkey, cached_seqs_pointer_file )
77 if seq_path == '':
78 stop_err( 'No sequence data found for dbkey %s, so sequence data cannot be used.' % options.dbkey )
79
80 # Build command.
81
82 # Base.
83 cmd = "cuffmerge -o cm_output "
84
85 # Add options.
86 if options.num_threads:
87 cmd += ( " -p %i " % int ( options.num_threads ) )
88 if options.ref_annotation:
89 cmd += " -g %s " % options.ref_annotation
90 if options.use_seq_data:
91 cmd += " -s %s " % seq_path
92
93 # Add input files to a file.
94 inputs_file_name = tempfile.NamedTemporaryFile( dir="." ).name
95 inputs_file = open( inputs_file_name, 'w' )
96 for arg in args:
97 inputs_file.write( arg + "\n" )
98 inputs_file.close()
99 cmd += inputs_file_name
100
101 # Debugging.
102 print cmd
103
104 # Run command.
105 try:
106 tmp_name = tempfile.NamedTemporaryFile( dir="." ).name
107 tmp_stderr = open( tmp_name, 'wb' )
108 proc = subprocess.Popen( args=cmd, shell=True, stderr=tmp_stderr.fileno() )
109 returncode = proc.wait()
110 tmp_stderr.close()
111
112 # Get stderr, allowing for case where it's very large.
113 tmp_stderr = open( tmp_name, 'rb' )
114 stderr = ''
115 buffsize = 1048576
116 try:
117 while True:
118 stderr += tmp_stderr.read( buffsize )
119 if not stderr or len( stderr ) % buffsize != 0:
120 break
121 except OverflowError:
122 pass
123 tmp_stderr.close()
124
125 # Error checking.
126 if returncode != 0:
127 raise Exception, stderr
128
129 if len( open( "cm_output/merged.gtf", 'rb' ).read().strip() ) == 0:
130 raise Exception, 'The output file is empty, there may be an error with your input file or settings.'
131
132 # Copy outputs.
133 shutil.copyfile( "cm_output/merged.gtf" , options.merged_transcripts )
134
135 except Exception, e:
136 stop_err( 'Error running cuffmerge. ' + str( e ) )
137
138 if __name__=="__main__": __main__()