0
|
1 #!/usr/bin/env python
|
|
2 """
|
|
3 usage: %prog $input $out_file1
|
|
4 -1, --cols=N,N,N,N: Columns for start, end, strand in input file
|
|
5 -d, --dbkey=N: Genome build of input file
|
|
6 -o, --output_format=N: the data type of the output file
|
|
7 -g, --GALAXY_DATA_INDEX_DIR=N: the directory containing alignseq.loc
|
|
8 -I, --interpret_features: if true, complete features are interpreted when input is GFF
|
|
9 -F, --fasta=<genomic_sequences>: genomic sequences to use for extraction
|
|
10 -G, --gff: input and output file, when it is interval, coordinates are treated as GFF format (1-based, half-open) rather than 'traditional' 0-based, closed format.
|
|
11 """
|
|
12 from galaxy import eggs
|
|
13 import pkg_resources
|
|
14 pkg_resources.require( "bx-python" )
|
|
15 import sys, string, os, re, tempfile, subprocess
|
|
16 from bx.cookbook import doc_optparse
|
|
17 from bx.intervals.io import Header, Comment
|
|
18 import bx.seq.nib
|
|
19 import bx.seq.twobit
|
|
20 from galaxy.tools.util.galaxyops import *
|
|
21 from galaxy.datatypes.util import gff_util
|
|
22
|
|
23 assert sys.version_info[:2] >= ( 2, 4 )
|
|
24
|
|
25 def stop_err( msg ):
|
|
26 sys.stderr.write( msg )
|
|
27 sys.exit()
|
|
28
|
|
29 def reverse_complement( s ):
|
|
30 complement_dna = {"A":"T", "T":"A", "C":"G", "G":"C", "a":"t", "t":"a", "c":"g", "g":"c", "N":"N", "n":"n" }
|
|
31 reversed_s = []
|
|
32 for i in s:
|
|
33 reversed_s.append( complement_dna[i] )
|
|
34 reversed_s.reverse()
|
|
35 return "".join( reversed_s )
|
|
36
|
|
37 def check_seq_file( dbkey, GALAXY_DATA_INDEX_DIR ):
|
|
38 seq_file = "%s/alignseq.loc" % GALAXY_DATA_INDEX_DIR
|
|
39 seq_path = ''
|
|
40 for line in open( seq_file ):
|
|
41 line = line.rstrip( '\r\n' )
|
|
42 if line and not line.startswith( "#" ) and line.startswith( 'seq' ):
|
|
43 fields = line.split( '\t' )
|
|
44 if len( fields ) < 3:
|
|
45 continue
|
|
46 if fields[1] == dbkey:
|
|
47 seq_path = fields[2].strip()
|
|
48 break
|
|
49 return seq_path
|
|
50
|
|
51 def __main__():
|
|
52 #
|
|
53 # Parse options, args.
|
|
54 #
|
|
55 options, args = doc_optparse.parse( __doc__ )
|
|
56 try:
|
|
57 chrom_col, start_col, end_col, strand_col = parse_cols_arg( options.cols )
|
|
58 dbkey = options.dbkey
|
|
59 output_format = options.output_format
|
|
60 gff_format = options.gff
|
|
61 interpret_features = options.interpret_features
|
|
62 GALAXY_DATA_INDEX_DIR = options.GALAXY_DATA_INDEX_DIR
|
|
63 fasta_file = options.fasta
|
|
64 input_filename, output_filename = args
|
|
65 except:
|
|
66 doc_optparse.exception()
|
|
67
|
|
68 includes_strand_col = strand_col >= 0
|
|
69 strand = None
|
|
70 nibs = {}
|
|
71 twobits = {}
|
|
72
|
|
73 #
|
|
74 # Set path to sequence data.
|
|
75 #
|
|
76 if fasta_file:
|
|
77 # Need to create 2bit file from fasta file.
|
|
78 try:
|
|
79 seq_path = tempfile.NamedTemporaryFile( dir="." ).name
|
|
80 cmd = "faToTwoBit %s %s" % ( fasta_file, seq_path )
|
|
81
|
|
82 tmp_name = tempfile.NamedTemporaryFile( dir="." ).name
|
|
83 tmp_stderr = open( tmp_name, 'wb' )
|
|
84 proc = subprocess.Popen( args=cmd, shell=True, stderr=tmp_stderr.fileno() )
|
|
85 returncode = proc.wait()
|
|
86 tmp_stderr.close()
|
|
87
|
|
88 # Get stderr, allowing for case where it's very large.
|
|
89 tmp_stderr = open( tmp_name, 'rb' )
|
|
90 stderr = ''
|
|
91 buffsize = 1048576
|
|
92 try:
|
|
93 while True:
|
|
94 stderr += tmp_stderr.read( buffsize )
|
|
95 if not stderr or len( stderr ) % buffsize != 0:
|
|
96 break
|
|
97 except OverflowError:
|
|
98 pass
|
|
99 tmp_stderr.close()
|
|
100
|
|
101 # Error checking.
|
|
102 if returncode != 0:
|
|
103 raise Exception, stderr
|
|
104 except Exception, e:
|
|
105 stop_err( 'Error running faToTwoBit. ' + str( e ) )
|
|
106 else:
|
|
107 seq_path = check_seq_file( dbkey, GALAXY_DATA_INDEX_DIR )
|
|
108 if not os.path.exists( seq_path ):
|
|
109 # If this occurs, we need to fix the metadata validator.
|
|
110 stop_err( "No sequences are available for '%s', request them by reporting this error." % dbkey )
|
|
111
|
|
112 #
|
|
113 # Fetch sequences.
|
|
114 #
|
|
115
|
|
116 # Get feature's line(s).
|
|
117 def get_lines( feature ):
|
|
118 if isinstance( feature, gff_util.GFFFeature ):
|
|
119 return feature.lines()
|
|
120 else:
|
|
121 return [ feature.rstrip( '\r\n' ) ]
|
|
122
|
|
123 skipped_lines = 0
|
|
124 first_invalid_line = 0
|
|
125 invalid_lines = []
|
|
126 fout = open( output_filename, "w" )
|
|
127 warnings = []
|
|
128 warning = ''
|
|
129 twobitfile = None
|
|
130 file_iterator = open( input_filename )
|
|
131 if gff_format and interpret_features:
|
|
132 file_iterator = gff_util.GFFReaderWrapper( file_iterator, fix_strand=False )
|
|
133 line_count = 1
|
|
134 for feature in file_iterator:
|
|
135 # Ignore comments, headers.
|
|
136 if isinstance( feature, ( Header, Comment ) ):
|
|
137 line_count += 1
|
|
138 continue
|
|
139
|
|
140 if gff_format and interpret_features:
|
|
141 # Processing features.
|
|
142 gff_util.convert_gff_coords_to_bed( feature )
|
|
143 chrom = feature.chrom
|
|
144 start = feature.start
|
|
145 end = feature.end
|
|
146 strand = feature.strand
|
|
147 else:
|
|
148 # Processing lines, either interval or GFF format.
|
|
149 line = feature.rstrip( '\r\n' )
|
|
150 if line and not line.startswith( "#" ):
|
|
151 fields = line.split( '\t' )
|
|
152 try:
|
|
153 chrom = fields[chrom_col]
|
|
154 start = int( fields[start_col] )
|
|
155 end = int( fields[end_col] )
|
|
156 if gff_format:
|
|
157 start, end = gff_util.convert_gff_coords_to_bed( [start, end] )
|
|
158 if includes_strand_col:
|
|
159 strand = fields[strand_col]
|
|
160 except:
|
|
161 warning = "Invalid chrom, start or end column values. "
|
|
162 warnings.append( warning )
|
|
163 if not invalid_lines:
|
|
164 invalid_lines = get_lines( feature )
|
|
165 first_invalid_line = line_count
|
|
166 skipped_lines += len( invalid_lines )
|
|
167 continue
|
|
168 if start > end:
|
|
169 warning = "Invalid interval, start '%d' > end '%d'. " % ( start, end )
|
|
170 warnings.append( warning )
|
|
171 if not invalid_lines:
|
|
172 invalid_lines = get_lines( feature )
|
|
173 first_invalid_line = line_count
|
|
174 skipped_lines += len( invalid_lines )
|
|
175 continue
|
|
176
|
|
177 if strand not in ['+', '-']:
|
|
178 strand = '+'
|
|
179 sequence = ''
|
|
180 else:
|
|
181 continue
|
|
182
|
|
183 # Open sequence file and get sequence for feature/interval.
|
|
184 if seq_path and os.path.exists( "%s/%s.nib" % ( seq_path, chrom ) ):
|
|
185 # TODO: improve support for GFF-nib interaction.
|
|
186 if chrom in nibs:
|
|
187 nib = nibs[chrom]
|
|
188 else:
|
|
189 nibs[chrom] = nib = bx.seq.nib.NibFile( file( "%s/%s.nib" % ( seq_path, chrom ) ) )
|
|
190 try:
|
|
191 sequence = nib.get( start, end-start )
|
|
192 except Exception, e:
|
|
193 warning = "Unable to fetch the sequence from '%d' to '%d' for build '%s'. " %( start, end-start, dbkey )
|
|
194 warnings.append( warning )
|
|
195 if not invalid_lines:
|
|
196 invalid_lines = get_lines( feature )
|
|
197 first_invalid_line = line_count
|
|
198 skipped_lines += len( invalid_lines )
|
|
199 continue
|
|
200 elif seq_path and os.path.isfile( seq_path ):
|
|
201 if not(twobitfile):
|
|
202 twobitfile = bx.seq.twobit.TwoBitFile( file( seq_path ) )
|
|
203 try:
|
|
204 if options.gff and interpret_features:
|
|
205 # Create sequence from intervals within a feature.
|
|
206 sequence = ''
|
|
207 for interval in feature.intervals:
|
|
208 sequence += twobitfile[interval.chrom][interval.start:interval.end]
|
|
209 else:
|
|
210 sequence = twobitfile[chrom][start:end]
|
|
211 except:
|
|
212 warning = "Unable to fetch the sequence from '%d' to '%d' for chrom '%s'. " %( start, end-start, chrom )
|
|
213 warnings.append( warning )
|
|
214 if not invalid_lines:
|
|
215 invalid_lines = get_lines( feature )
|
|
216 first_invalid_line = line_count
|
|
217 skipped_lines += len( invalid_lines )
|
|
218 continue
|
|
219 else:
|
|
220 warning = "Chromosome by name '%s' was not found for build '%s'. " % ( chrom, dbkey )
|
|
221 warnings.append( warning )
|
|
222 if not invalid_lines:
|
|
223 invalid_lines = get_lines( feature )
|
|
224 first_invalid_line = line_count
|
|
225 skipped_lines += len( invalid_lines )
|
|
226 continue
|
|
227 if sequence == '':
|
|
228 warning = "Chrom: '%s', start: '%s', end: '%s' is either invalid or not present in build '%s'. " \
|
|
229 % ( chrom, start, end, dbkey )
|
|
230 warnings.append( warning )
|
|
231 if not invalid_lines:
|
|
232 invalid_lines = get_lines( feature )
|
|
233 first_invalid_line = line_count
|
|
234 skipped_lines += len( invalid_lines )
|
|
235 continue
|
|
236 if includes_strand_col and strand == "-":
|
|
237 sequence = reverse_complement( sequence )
|
|
238
|
|
239 if output_format == "fasta" :
|
|
240 l = len( sequence )
|
|
241 c = 0
|
|
242 if gff_format:
|
|
243 start, end = gff_util.convert_bed_coords_to_gff( [ start, end ] )
|
|
244 fields = [dbkey, str( chrom ), str( start ), str( end ), strand]
|
|
245 meta_data = "_".join( fields )
|
|
246 fout.write( ">%s\n" % meta_data )
|
|
247 while c < l:
|
|
248 b = min( c + 50, l )
|
|
249 fout.write( "%s\n" % str( sequence[c:b] ) )
|
|
250 c = b
|
|
251 else: # output_format == "interval"
|
|
252 if gff_format and interpret_features:
|
|
253 # TODO: need better GFF Reader to capture all information needed
|
|
254 # to produce this line.
|
|
255 meta_data = "\t".join(
|
|
256 [feature.chrom, "galaxy_extract_genomic_dna", "interval", \
|
|
257 str( feature.start ), str( feature.end ), feature.score, feature.strand,
|
|
258 ".", gff_util.gff_attributes_to_str( feature.attributes, "GTF" ) ] )
|
|
259 else:
|
|
260 meta_data = "\t".join( fields )
|
|
261 if gff_format:
|
|
262 format_str = "%s seq \"%s\";\n"
|
|
263 else:
|
|
264 format_str = "%s\t%s\n"
|
|
265 fout.write( format_str % ( meta_data, str( sequence ) ) )
|
|
266
|
|
267 # Update line count.
|
|
268 if isinstance( feature, gff_util.GFFFeature ):
|
|
269 line_count += len( feature.intervals )
|
|
270 else:
|
|
271 line_count += 1
|
|
272
|
|
273 fout.close()
|
|
274
|
|
275 if warnings:
|
|
276 warn_msg = "%d warnings, 1st is: " % len( warnings )
|
|
277 warn_msg += warnings[0]
|
|
278 print warn_msg
|
|
279 if skipped_lines:
|
|
280 # Error message includes up to the first 10 skipped lines.
|
|
281 print 'Skipped %d invalid lines, 1st is #%d, "%s"' % ( skipped_lines, first_invalid_line, '\n'.join( invalid_lines[:10] ) )
|
|
282
|
|
283 if __name__ == "__main__": __main__()
|