comparison filter_by_index_gamma @ 0:6411ca16916e default tip

initial commit
author Yusuf Ali <ali@yusuf.email>
date Wed, 25 Mar 2015 13:23:29 -0600
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:6411ca16916e
1 #!/usr/bin/env perl
2
3 use strict;
4 use warnings;
5 use DB_File;
6 use Parse::BooleanLogic;
7 use Math::CDF qw(pgamma qgamma); # relevance score -> gamma p-value
8 use PDL qw(pdl);
9 use PDL::Stats::Distr qw(mme_gamma); # gamma dist parameter estimates
10 use vars qw($parser %cached_sentences %sentence_index);
11
12 my $quiet = 0;
13 if(@ARGV and $ARGV[0] =~ /^-q/){
14 $quiet = 1;
15 shift @ARGV;
16 }
17
18 @ARGV == 5 or die "Usage: $0 [-q(uiet)] <index filename base> <db name> <hgvs_annotated.txt> <output.txt> <query>\nWhere query has the format \"this or that\", \"this and that\", etc.\n";
19
20 my $signal_p = 0.95; # signal is top 5% of scores
21 my $index_filename_base = shift @ARGV;
22 my $db_name = shift @ARGV;
23 my $hgvs_file = shift @ARGV;
24 my $out_file = shift @ARGV;
25 my $orig_query = shift @ARGV;
26
27 $parser = new Parse::BooleanLogic(operators => ['and', 'or']);
28 my $query_tree = $parser->as_array($orig_query, error_cb => sub {die "Could not parse query: @_\n"});
29 # For simplicity, turn the tree into a base set of or statements (which means expanding "A and (B or C)" into "A and B or A and C") a.k.a. "sum of products/minterms"
30 my @query_terms = flatten_query($query_tree);
31
32 my $df_index_filename = $index_filename_base."df_index";
33 my %df_index;
34 my $df_index_handle = tie %df_index, "DB_File", $df_index_filename, O_RDONLY, 0400, $DB_BTREE
35 or die "Cannot open $df_index_filename: $!\n";
36 my $gene_record_count = $df_index{"__DOC_COUNT__"};
37
38 my $sentence_index_filename = $index_filename_base."sentence_index";
39 my $sentence_index_handle = tie %sentence_index, "DB_File", $sentence_index_filename, O_RDONLY, 0400, $DB_HASH
40 or die "Cannot open $sentence_index_filename: $!\n";
41
42 # Get the list of gene symbols we'll need
43 open(HGVS, $hgvs_file)
44 or die "Cannot open $hgvs_file for reading: $!\n";
45 my $header = <HGVS>;
46 chomp $header;
47 my @header_columns = split /\t/, $header;
48 my ($gene_name_column, $chr_column, $from_column, $to_column);
49 for(my $i = 0; $i <= $#header_columns; $i++){
50 if($header_columns[$i] eq "Gene Name"){
51 $gene_name_column = $i;
52 }
53 elsif($header_columns[$i] eq "Chr"){
54 $chr_column = $i;
55 }
56 elsif($header_columns[$i] eq "DNA From"){
57 $from_column = $i;
58 }
59 elsif($header_columns[$i] eq "DNA To"){
60 $to_column = $i;
61 }
62 }
63 my $blank_query = not @query_terms;
64 # Special case of empty query means print all info for variant ranges listed in the input HGVS file (assuming the DB was indexed to include chr:pos keys)
65 if($blank_query){
66 #print STDERR "Running blank query\n";
67 if(not defined $chr_column){
68 die "Could not find 'Chr' column in the input header, aborting\n";
69 }
70 if(not defined $from_column){
71 die "Could not find 'DNA From' column in the input header, aborting\n";
72 }
73 if(not defined $to_column){
74 die "Could not find 'DNA To' column in the input header, aborting\n";
75 }
76 # Build the list of locations that will need to be searched in the index
77
78 open(OUT, ">$out_file")
79 or die "Cannot open $out_file for writing: $!\n";
80 print OUT $header, "\t$db_name Text Matches\n";
81
82 while(<HGVS>){
83 chomp;
84 my @F = split /\t/, $_, -1;
85 my @pos_data;
86 for my $pos ($F[$from_column]..$F[$to_column]){ # for each position in the range
87 my $pos_match_data = fetch_sentence("$F[$chr_column]:$pos", -1); # fetch all data for this position
88 push @pos_data, "*$F[$chr_column]:$pos* ".$pos_match_data if defined $pos_match_data;
89 }
90 print OUT join("\t", @F, join(" // ", @pos_data)),"\n";
91 }
92 close(OUT);
93 exit;
94 }
95 elsif(not defined $gene_name_column){
96 die "Could not find 'Gene Name' column in the input header, aborting\n";
97 }
98 #print STDERR "Query terms: " , scalar(@query_terms), "\n";
99 my %gene_to_query_match_ranges;
100 # Determine the set of genes that might match the query, based on the word index
101 for my $query_term (@query_terms){
102 #print STDERR "Query term $query_term\n";
103 my %doc_hits; # how many needed words match the document?
104 my $contiguous = 1; #by default multiword queries must be contiguous
105 # Unless it's an AND query
106 if($query_term =~ s/ and / /g){
107 $contiguous = 0;
108 }
109
110 my @words = split /\s+/, $query_term; # can be multi-word term like "mental retardation"
111 for(my $i = 0; $i <= $#words; $i++){
112 my $word = mc($words[$i]); # can be a stem word, like hypoton
113 #print STDERR "Checking word $word...";
114 if($i == 0){
115 my $first_word_docs = get_doc_offsets($df_index_handle, $word); # get all words' docs off this stem
116 #print STDERR scalar(keys %$first_word_docs), " documents found\n";
117 for my $doc (keys %$first_word_docs){
118 $doc_hits{$doc} = $first_word_docs->{$doc}; # populate initial hit list that'll be whittled down in subsequent outer loops of multiword phrase members
119 }
120 next;
121 }
122 my @candidate_docs = keys %doc_hits;
123 last if not @candidate_docs; # short circuit searches guaranteed to fail
124
125 # each additional word must directly follow an existing match
126 my $word_doc_offsets_ref = get_doc_offsets($df_index_handle, $word); # get all words' docs off this stem
127 #print STDERR scalar(keys %$word_doc_offsets_ref), " documents found\n";
128 for my $doc (@candidate_docs){
129 my $num_matches = 0;
130 if(not exists $word_doc_offsets_ref->{$doc}){ # required word missing, eliminate doc from consideration
131 delete $doc_hits{$doc};
132 next;
133 }
134 # see if any of the instances of the additional words directly follow the last word we successfully matched
135 my $so_far_matches_ref = $doc_hits{$doc};
136 my $next_word_matches_ref = $word_doc_offsets_ref->{$doc};
137 for (my $j=0; $j <= $#{$so_far_matches_ref}; $j++){
138 my $existing_match_extended = 0;
139 next unless defined $so_far_matches_ref->[$j]->[2]; # every once in a while there is no article id parsed
140 for (my $k=0; $k <= $#{$next_word_matches_ref}; $k++){
141 # Same article?
142 next unless defined $next_word_matches_ref->[$k]->[2] and $next_word_matches_ref->[$k]->[2] eq $so_far_matches_ref->[$j]->[2];
143 if(not $contiguous){
144 $so_far_matches_ref->[$j]->[4] .= " AND ".$next_word_matches_ref->[$k]->[4]; # update the matched term to include the extension too
145 if(ref $so_far_matches_ref->[$j]->[3] ne "ARRAY"){ # match does not yet span multiple sentences
146 last if $next_word_matches_ref->[$k]->[3] == $so_far_matches_ref->[$j]->[3]; # same sentence
147 $so_far_matches_ref->[$j]->[3] = [$so_far_matches_ref->[$j]->[3], $next_word_matches_ref->[$k]->[3]]; # change from scalar to array (of sentence numbers)
148 }
149 elsif(not grep {$_ eq $next_word_matches_ref->[$k]->[3]} @{$so_far_matches_ref->[$j]->[3]}){
150 push @{$so_far_matches_ref->[$j]->[3]}, $next_word_matches_ref->[$k]->[3]; # add top spanning sentences list of not already there
151 }
152 }
153 # else contiguous word occurences required.
154 # Same sentence?
155 next unless $next_word_matches_ref->[$k]->[3] == $so_far_matches_ref->[$j]->[3];
156
157 my $space_between_match_words = $next_word_matches_ref->[$k]->[0] - $so_far_matches_ref->[$j]->[1];
158 if($space_between_match_words <= 2){
159 $existing_match_extended = 1;
160 $so_far_matches_ref->[$j]->[1] = $next_word_matches_ref->[$k]->[1]; # move the match cursor to include the new extending word
161 $so_far_matches_ref->[$j]->[4] .= " ".$next_word_matches_ref->[$k]->[4]; # update the matched term to include the extension too
162 last;
163 }
164 elsif($space_between_match_words > 2){ # more than two typographical symbols between words, consider non-continuous
165 last; # since the offsets are in order, any further k would only yield a larger spacing, so shortcircuit
166 }
167 }
168 if(not $existing_match_extended){
169 splice(@$so_far_matches_ref, $j, 1);
170 $j--;
171 }
172 else{
173 $num_matches++;
174 }
175 }
176 if(not $num_matches){
177 delete $doc_hits{$doc};
178 }
179 }
180 }
181 # the only keys that get to this point should be those that match all terms
182 for my $doc (keys %doc_hits){
183 $gene_to_query_match_ranges{$doc} = [] if not exists $gene_to_query_match_ranges{$doc};
184 push @{$gene_to_query_match_ranges{$doc}}, [$query_term, @{$doc_hits{$doc}}];
185 }
186 }
187
188 my @matched_genes = keys %gene_to_query_match_ranges;
189 #print STDERR "Found ", scalar(@matched_genes), "/$gene_record_count records in cached iHOP matching the query\n" unless $quiet;
190 my %query_gene_counts;
191 my %ntf;
192 for my $gene (keys %gene_to_query_match_ranges){
193 my $max_doc_word_count = $df_index{"__DOC_MAX_WC_$gene"};
194 for my $count_record (@{$gene_to_query_match_ranges{$gene}}){
195 my ($query_term, @query_term_match_ranges_in_this_gene) = @$count_record;
196 # next if $query_term eq $gene; # slightly controversial? exclude references to genes from the score if the gene is the record being talked about (obviously it will be highly scored)
197 # allows us to find first degree interactors (i.e. points for "A interacts with B", in the record describing A) without creating crazy high score for doc describing gene B if B was in the original query without any phenotype query terms
198 $query_gene_counts{$query_term}++;
199
200 $ntf{$gene} = {} unless exists $ntf{$gene};
201 # atypical use of log in order to weigh heavy use of a common term less than occasional use of a rare term
202 $ntf{$gene}->{$query_term} = log($#query_term_match_ranges_in_this_gene+2)/log($max_doc_word_count+1);
203 }
204 #print STDERR "Doc max word count is $max_doc_word_count for $gene, ntf keys = ", keys %{$ntf{$gene}}, "\n";
205 }
206
207 my %idf;
208 for my $query_term (@query_terms){ # convert %idf values from documents-with-the-query-term-count to actual IDF
209 next unless exists $query_gene_counts{$query_term}; # query not in the document collection
210 $idf{$query_term} = log($gene_record_count/$query_gene_counts{$query_term});
211 #print STDERR "$query_term IDF is $idf{$query_term}\n";
212 }
213
214 # Create a relevance score using a normalized term frequency - inverse document frequency summation
215 my %relevance_score;
216 my %matched_query_terms;
217 for my $gene_symbol (keys %gene_to_query_match_ranges){
218 my $relevance_score = 0;
219 # Hmm, take average, sum or max of TF-IDFs?
220 my $max_query_score = 0;
221 my @matched_query_terms;
222 my $query_score = 0;
223 for (my $i = 0; $i <= $#query_terms; $i++){
224 my $query_term = $query_terms[$i];
225 next unless exists $idf{$query_term};
226 next unless exists $ntf{$gene_symbol}->{$query_term};
227 $query_score += $ntf{$gene_symbol}->{$query_term}*$idf{$query_term};
228 push @matched_query_terms, $query_term;
229 $query_score *= 1-$i/scalar(@query_terms)/2 if scalar(@query_terms) > 2;# adjust the query score so the first terms are weighted more heavily if a bunch of terms are being searched
230 $max_query_score = $query_score if $query_score > $max_query_score;
231 $relevance_score += $query_score;
232 }
233 # this square root trick will not affect the score of a single term query, but will penalize a high total score that is comprised of a bunch of low value individual term scores)
234 $relevance_score{$gene_symbol} = sqrt($relevance_score*$max_query_score);
235 #print STDERR "Relevance score for $gene_symbol is $relevance_score{$gene_symbol}\n";
236 $matched_query_terms{$gene_symbol} = \@matched_query_terms;
237 }
238
239 # Characterize relevance score as a gamma statistical distribution and convert to probability
240 my $max_relevance_score = 0;
241 for my $relevance_score (values %relevance_score){
242 $max_relevance_score = $relevance_score if $relevance_score > $max_relevance_score;
243 }
244 # Remove top end scores as signal, characterize the rest as noise.
245 # Iterative estimation of gamma parameters and removing data within range where CDF>99%
246 my $noise_data = pdl(values %relevance_score);
247 my ($shape, $scale) = $noise_data->mme_gamma();
248 #print STDERR "Initial gamma distribution estimates: $shape, $scale (max observation $max_relevance_score)\n";
249 my $signal_cutoff = qgamma($signal_p, $shape, 1/$scale);
250 my @noise_data;
251 for my $gene_symbol (keys %relevance_score){
252 my $score = $relevance_score{$gene_symbol};
253 push @noise_data, $score if $score < $signal_cutoff;
254 }
255 $noise_data = pdl(@noise_data);
256 ($shape, $scale) = $noise_data->mme_gamma();
257 #print STDERR "Revised gamma distribution estimates (noise estimate at $signal_cutoff (CDF $signal_p)): $shape, $scale\n";
258 # Convert scores to probabilities
259 for my $gene_symbol (keys %relevance_score){
260 $relevance_score{$gene_symbol} = 1-pgamma($relevance_score{$gene_symbol}, $shape, 1/$scale);
261 }
262
263 #TODO: create summary stats for each query term so the user gets an idea of each's contribution?
264
265 my %pubmed_matches;
266 for my $gene_symbol (keys %gene_to_query_match_ranges){
267 my $query_match_ranges_ref = $gene_to_query_match_ranges{$gene_symbol};
268 my %matching_sentences;
269 for my $count_record (@$query_match_ranges_ref){
270 my ($query_term, @query_term_match_ranges_in_this_gene) = @$count_record;
271 for my $occ_info (@query_term_match_ranges_in_this_gene){
272 my $id = $occ_info->[2];
273 my $sentence_number = $occ_info->[3];
274 my $query_match_word = $occ_info->[4];
275 # Fetch the preparsed sentence from the sentence index based on id and sentence number
276 # Will automatically *HIGHLIGHT* the query terms fetched in the sentence over the course of this script
277 if(ref $sentence_number eq "ARRAY"){ # match spans multiple sentences
278 for my $s (@$sentence_number){
279 for my $word (split / AND /, $query_match_word){
280 #print STDERR "Highlighting $word in $id #$s for query term $query_term (multisentence match)\n";
281 $matching_sentences{fetch_sentence_key($id, $s, $word)}++;
282 }
283 }
284 }
285 else{ # single sentence match
286 #print STDERR "Highlighting $query_match_word in $id #$sentence_number for query term $query_term\n";
287 $matching_sentences{fetch_sentence_key($id, $sentence_number, $query_match_word)}++;
288 }
289 }
290 }
291 $gene_symbol =~ s/_/\//; # didn't have a forward slash in a gene name for disk caching purposes
292 if(keys %matching_sentences){
293 $pubmed_matches{$gene_symbol} = [] unless exists $pubmed_matches{$gene_symbol};
294 for my $new_match_ref (keys %matching_sentences){
295 push @{$pubmed_matches{$gene_symbol}}, $new_match_ref unless grep {$_ eq $new_match_ref} @{$pubmed_matches{$gene_symbol}}; # only put in new sentences, no need to dup
296 }
297 }
298 }
299
300 $orig_query =~ s/\s+/ /; # normalized whitespace
301 $orig_query =~ s/ and / and /i; # lc()
302 my @orig_query_terms = split /\s+or\s+/, $orig_query;
303
304 open(OUT, ">$out_file")
305 or die "Cannot open $out_file for writing: $!\n";
306 my $new_header = $header;
307 $new_header .= "\t$db_name p-value (log normalized TF-IDF score, gamma dist)\t$db_name Matching Terms ($orig_query)\t$db_name Text Matches";
308 print OUT $new_header, "\n";
309
310 # Check if any of the variants in the annotated HGVS table are in genes from the OMIM match list
311 while(<HGVS>){
312 chomp;
313 my @F = split /\t/, $_, -1;
314 # order the ids from highest number of sentence matches to lowest, from highest ranked term to least
315 my (%id2match_count, %id2sentences);
316 my @matched_genes;
317 my $relevance_score_final = 1;
318 my @matched_query_terms;
319 for my $gene_name (split /\s*;\s*/, $F[$gene_name_column]){
320 next unless exists $pubmed_matches{$gene_name};
321 push @matched_genes, $gene_name;
322 for my $sentence_ref (@{$pubmed_matches{$gene_name}}){ # 0 == always fetch the title which is stored in sentence index 0
323 my $pubmed_record = fetch_sentence($sentence_ref);
324 $id2match_count{$pubmed_record->[0]}++; # key = id
325 if(not exists $id2sentences{$pubmed_record->[0]}){
326 $id2sentences{$pubmed_record->[0]} = {};
327 my $title_record = fetch_sentence(fetch_sentence_key($pubmed_record->[0], 0, ""));
328 next unless $title_record->[0];
329 print STDERR "No $index_filename_base sentence number for ", $title_record->[0], "\n" if not defined $title_record->[1];
330 print STDERR "No $index_filename_base sentence text for ", $title_record->[0], " sentence #", $title_record->[1], "\n" if not defined $title_record->[2];
331 $id2sentences{$title_record->[0]}->{$title_record->[2]} = $title_record->[1];
332 }
333 # Only print sentences that match a query term other than the gene name for the record key, if that gene name is part of the query
334 my $non_self_query_ref = 0;
335 while($pubmed_record->[2] =~ /\*(.+?)\*/g){
336 if($1 ne $gene_name){
337 $non_self_query_ref = 1;
338 last;
339 }
340 }
341 #print STDERR "rejected $gene_name self-only sentence ",$pubmed_record->[2],"\n" unless $non_self_query_ref;
342 next unless $non_self_query_ref;
343 $id2sentences{$pubmed_record->[0]}->{$pubmed_record->[2]} = $pubmed_record->[1]; # value = sentence order within pubmed text
344 }
345 $relevance_score_final *= $relevance_score{$gene_name};
346 push @matched_query_terms, @{$matched_query_terms{$gene_name}};
347 }
348
349 # If we get here, there were matches
350 my @ordered_ids = sort {$id2match_count{$b} <=> $id2match_count{$a}} keys %id2match_count;
351
352 # print sentences in each id in order, with ellipsis if not contiguous
353 my %h;
354 print OUT join("\t", @F, ($relevance_score_final != 1 ? $relevance_score_final : ""), (@matched_query_terms ? join("; ", sort grep {not $h{$_}++} @matched_query_terms) : "")), "\t";
355 my $first_record = 1;
356 for my $id (@ordered_ids){
357 my $sentence2order = $id2sentences{$id};
358 my @ordered_sentences = sort {$sentence2order->{$a} <=> $sentence2order->{$b}} keys %$sentence2order;
359 next if scalar(@ordered_sentences) == 1; # due to self-gene only referencing filter above, we may have no matching sentences in a record. Skip in this case.
360 if($first_record){
361 $first_record = 0;
362 }
363 else{
364 print OUT " // ";
365 }
366 my $title = shift(@ordered_sentences);
367 print OUT "$db_name $id",(defined $title ? " $title": ""),":"; # first sentence is always the record title
368 my $last_ordinal = 0;
369 for my $s (@ordered_sentences){
370 if($last_ordinal and $sentence2order->{$s} != $last_ordinal+1){
371 print OUT "..";
372 }
373 print OUT " ",$s;
374 $last_ordinal = $sentence2order->{$s};
375 }
376 }
377 print OUT "\n";
378 }
379
380 sub get_doc_offsets{
381 my ($db_handle, $word_stem) = @_;
382 my %doc2offsets;
383
384 my $is_uc = $word_stem =~ /^[A-Z0-9]+$/;
385 my $has_wildcard = $word_stem =~ s/\*$//;
386 my $value = 0;
387 my $cursor_key = $word_stem;
388 # retrieves the first
389 for(my $status = $db_handle->seq($cursor_key, $value, R_CURSOR);
390 $status == 0;
391 $status = $db_handle->seq($cursor_key, $value, R_NEXT)){
392 if(CORE::index($cursor_key,$word_stem) != 0){
393 last; # outside the records that have the requested stem now
394 }
395 for my $record (split /\n/s, $value){
396 my ($doc, @occ_infos) = split /:/, $record;
397 $doc2offsets{$doc} = [] if not exists $doc2offsets{$doc};
398 for my $occ_info (@occ_infos){
399 my ($term_offset, $id, $sentence_number) = split /,/, $occ_info, -1;
400 # record start and end of word to facilitate partial key consecutive word matching algorithm used in this script
401 push @{$doc2offsets{$doc}}, [$term_offset, $term_offset+length($cursor_key), $id, $sentence_number, $cursor_key];
402 }
403 }
404 last if $is_uc and not $has_wildcard; # only exact matches for upper case words like gene names
405 }
406 return \%doc2offsets;
407 }
408
409 sub mc{
410 if($_[0] =~ /^[A-Z][a-z]+$/){
411 return lc($_[0]); # sentence case normalization to lower case for regular words
412 }
413 else{
414 return $_[0]; # as-is for gene names, etc
415 }
416 }
417
418 sub fetch_sentence_key{
419 my ($id, $sentence_number, $query_term) = @_;
420
421 $sentence_number = 0 if not defined $sentence_number;
422 return ":$sentence_number" if not $id;
423 my $key = "$id:$sentence_number";
424 if(not exists $cached_sentences{$key}){
425 my @sentences = split /\n/, $sentence_index{$id};
426 $cached_sentences{$key} = $sentences[$sentence_number];
427 }
428 $cached_sentences{$key} =~ s/\b\Q$query_term\E\b(?!\*)/"*".uc($query_term)."*"/ge unless $query_term eq "";
429 #print STDERR "Highlighted $query_term in $cached_sentences{$key}\n" if $query_term =~ /cirrhosis/;
430 return $key;
431 }
432
433 sub fetch_sentence{
434 if(@_ == 1){ # from cache
435 return [split(/:/, $_[0]), $cached_sentences{$_[0]}];
436 }
437 else{ # if more than one arg, DIRECT FROM index key as first arg, sentence # is second arg
438 return undef if not exists $sentence_index{$_[0]};
439 my @sentences = split /\n/, $sentence_index{$_[0]};
440 if($_[1] < 0){ # all sentences request
441 return join("; ", @sentences);
442 }
443 return $sentences[$_[1]];
444 }
445 }
446
447
448 # boolean operator tree to flat expanded single depth "or" op query
449 sub flatten_query{
450 my $tree = shift @_;
451 my @or_queries;
452
453 # Base case: the tree is just a leaf (denoted by a hash reference). Return value of the operand it represents.
454 if(ref $tree eq "HASH"){
455 return ($tree->{"operand"});
456 }
457
458 elsif(not ref $tree){
459 return $tree;
460 }
461
462 # Otherwise it's an operation array
463 if(ref $tree ne "ARRAY"){
464 die "Could not parse $tree, logic error in the query parser\n";
465 }
466
467 # Deal with AND first since it has higher precedence
468 for (my $i = 1; $i < $#{$tree}; $i++){
469 if($tree->[$i] eq "and"){
470 my @expanded_term;
471 my @t1_terms = flatten_query($tree->[$i-1]);
472 my @t2_terms = flatten_query($tree->[$i+1]);
473 #print STDERR "need to expand ", $tree->[$i-1], "(@t1_terms) AND ", $tree->[$i+1], "(@t2_terms)\n";
474 for my $term1 (@t1_terms){
475 for my $term2 (@t2_terms){
476 #print STDERR "Expanding to $term1 and $term2\n";
477 push @expanded_term, "$term1 and $term2";
478 }
479 }
480 splice(@$tree, $i-1, 3, @expanded_term);
481 $i--; # list has been shortened
482 }
483 }
484 # Should be only "OR" ops left
485 # Resolve any OR subtrees
486 for(my $i = 0; $i <= $#{$tree}; $i++){
487 next if $tree->[$i] eq "or";
488 push @or_queries, flatten_query($tree->[$i]); # otherwise recursive parse
489 }
490
491 return @or_queries;
492 }