changeset 0:0d2226e1c5f6 draft

Uploaded
author cpt
date Fri, 17 Jun 2022 13:12:20 +0000
parents
children edd518e72c89
files cpt_read_garnier/cpt-macros.xml cpt_read_garnier/macros.xml cpt_read_garnier/reading_garnier_output.py cpt_read_garnier/reading_garnier_output.xml
diffstat 4 files changed, 361 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cpt_read_garnier/cpt-macros.xml	Fri Jun 17 13:12:20 2022 +0000
@@ -0,0 +1,115 @@
+<?xml version="1.0"?>
+<macros>
+	<xml name="gff_requirements">
+		<requirements>
+			<requirement type="package" version="2.7">python</requirement>
+			<requirement type="package" version="1.65">biopython</requirement>
+			<requirement type="package" version="2.12.1">requests</requirement>
+			<yield/>
+		</requirements>
+		<version_command>
+		<![CDATA[
+			cd $__tool_directory__ && git rev-parse HEAD
+		]]>
+		</version_command>
+	</xml>
+	<xml name="citation/mijalisrasche">
+		<citation type="doi">10.1371/journal.pcbi.1008214</citation>
+		<citation type="bibtex">@unpublished{galaxyTools,
+		author = {E. Mijalis, H. Rasche},
+		title = {CPT Galaxy Tools},
+		year = {2013-2017},
+		note = {https://github.com/tamu-cpt/galaxy-tools/}
+		}
+		</citation>
+	</xml>
+	<xml name="citations">
+		<citations>
+			<citation type="doi">10.1371/journal.pcbi.1008214</citation>
+			<citation type="bibtex">
+			@unpublished{galaxyTools,
+				author = {E. Mijalis, H. Rasche},
+				title = {CPT Galaxy Tools},
+				year = {2013-2017},
+				note = {https://github.com/tamu-cpt/galaxy-tools/}
+			}
+			</citation> 
+		<yield/>
+		</citations>
+	</xml>
+    	<xml name="citations-crr">
+		<citations>
+			<citation type="doi">10.1371/journal.pcbi.1008214</citation>
+			<citation type="bibtex">
+			@unpublished{galaxyTools,
+				author = {C. Ross},
+				title = {CPT Galaxy Tools},
+				year = {2020-},
+				note = {https://github.com/tamu-cpt/galaxy-tools/}
+			}
+			</citation>
+		<yield/>
+		</citations>
+	</xml>
+        <xml name="citations-2020">
+		<citations>
+			<citation type="doi">10.1371/journal.pcbi.1008214</citation>
+			<citation type="bibtex">
+			@unpublished{galaxyTools,
+				author = {E. Mijalis, H. Rasche},
+				title = {CPT Galaxy Tools},
+				year = {2013-2017},
+				note = {https://github.com/tamu-cpt/galaxy-tools/}
+			}
+			</citation>
+                        <citation type="bibtex">
+			@unpublished{galaxyTools,
+				author = {A. Criscione},
+				title = {CPT Galaxy Tools},
+				year = {2019-2021},
+				note = {https://github.com/tamu-cpt/galaxy-tools/}
+			}
+                        </citation>
+                        <yield/>
+		</citations>
+	</xml>
+        <xml name="citations-2020-AJC-solo">
+		<citations>
+			<citation type="doi">10.1371/journal.pcbi.1008214</citation>
+                        <citation type="bibtex">
+			@unpublished{galaxyTools,
+				author = {A. Criscione},
+				title = {CPT Galaxy Tools},
+				year = {2019-2021},
+				note = {https://github.com/tamu-cpt/galaxy-tools/}
+			}
+                        </citation>
+                        <yield/>
+		</citations>
+	</xml>
+        <xml name="citations-clm">
+		<citations>
+			<citation type="doi">10.1371/journal.pcbi.1008214</citation>
+			<citation type="bibtex">
+			@unpublished{galaxyTools,
+				author = {C. Maughmer},
+				title = {CPT Galaxy Tools},
+				year = {2017-2020},
+				note = {https://github.com/tamu-cpt/galaxy-tools/}
+			}
+			</citation>
+                        <yield/>
+		</citations>
+	</xml>
+        <xml name="sl-citations-clm">
+			<citation type="bibtex">
+			@unpublished{galaxyTools,
+				author = {C. Maughmer},
+				title = {CPT Galaxy Tools},
+				year = {2017-2020},
+				note = {https://github.com/tamu-cpt/galaxy-tools/}
+			}
+			</citation>
+                        <yield/>
+	</xml>
+</macros>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cpt_read_garnier/macros.xml	Fri Jun 17 13:12:20 2022 +0000
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<macros>
+	<xml name="requirements">
+		<requirements>
+			<requirement type="package" version="3.8.13">python</requirement>
+			<requirement type="package" version="1.79">biopython</requirement>
+			<requirement type="package" version="1.2.2">cpt_gffparser</requirement>  
+			<yield/>
+		</requirements>
+	</xml>
+	<token name="@BLAST_TSV@">
+		"$blast_tsv"
+	</token>
+	<xml name="blast_tsv">
+		<param label="Blast Results" help="TSV/tabular (25 Column)"
+			name="blast_tsv" type="data" format="tabular" />
+	</xml>
+
+	<token name="@BLAST_XML@">
+		"$blast_xml"
+	</token>
+	<xml name="blast_xml">
+		<param label="Blast Results" help="XML format"
+			name="blast_xml" type="data" format="blastxml" />
+	</xml>
+	<xml name="gff3_with_fasta">
+	<param label="Genome Sequences" name="fasta" type="data" format="fasta" />
+	<param label="Genome Annotations" name="gff3" type="data" format="gff3" />
+	</xml>
+	<xml name="genome_selector">
+		<conditional name="reference_genome">
+			<param name="reference_genome_source" type="select" label="Reference Genome">
+				<option value="history" selected="True">From History</option>
+				<option value="cached">Locally Cached</option>
+			</param>
+			<when value="cached">
+				<param name="fasta_indexes" type="select" label="Source FASTA Sequence">
+					<options from_data_table="all_fasta"/>
+				</param>
+			</when>
+			<when value="history">
+				<param name="genome_fasta" type="data" format="fasta" label="Source FASTA Sequence"/>
+			</when>
+		</conditional>
+	</xml>
+	<xml name="gff3_input">
+		<param label="GFF3 Annotations" name="gff3_data" type="data" format="gff3"/>
+	</xml>
+	<xml name="input/gff3+fasta">
+		<expand macro="gff3_input" />
+		<expand macro="genome_selector" />
+	</xml>
+	<token name="@INPUT_GFF@">
+	"$gff3_data"
+	</token>
+	<token name="@INPUT_FASTA@">
+#if str($reference_genome.reference_genome_source) == 'cached':
+		"${reference_genome.fasta_indexes.fields.path}"
+#else if str($reference_genome.reference_genome_source) == 'history':
+		genomeref.fa
+#end if
+	</token>
+	<token name="@GENOME_SELECTOR_PRE@">
+#if $reference_genome.reference_genome_source == 'history':
+		ln -s $reference_genome.genome_fasta genomeref.fa;
+#end if
+	</token>
+	<token name="@GENOME_SELECTOR@">
+#if str($reference_genome.reference_genome_source) == 'cached':
+		"${reference_genome.fasta_indexes.fields.path}"
+#else if str($reference_genome.reference_genome_source) == 'history':
+		genomeref.fa
+#end if
+	</token>
+        <xml name="input/fasta">
+		<param label="Fasta file" name="sequences" type="data" format="fasta"/>
+	</xml>
+
+	<token name="@SEQUENCE@">
+		"$sequences"
+	</token>
+	<xml name="input/fasta/protein">
+		<param label="Protein fasta file" name="sequences" type="data" format="fasta"/>
+	</xml>
+</macros>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cpt_read_garnier/reading_garnier_output.py	Fri Jun 17 13:12:20 2022 +0000
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+
+import csv
+import argparse
+
+# import sys
+
+# This function reads through the tagseq file and outputs a list of sequence names and the lengths of each sequence.
+def garnier_sequences(tagseq_file=None):
+    # open the file and create blank lists
+    f = tagseq_file  # open(tagseq_file, 'r')
+    f.seek(0)
+    sequence = []
+    lengths = []
+
+    # for each line the in file, search for the words 'Sequence' and 'to' to find the sequence name and length,
+    # respectively. Then add sequence names and lengths to the proper lists
+    for line in f:
+        words = line.split()
+        if line.startswith("# Sequence:"):
+            # if 'Sequence:' in line:
+            # if words[1] == 'Sequence:':
+            sequence += [words[words.index("Sequence:") + 1]]
+            # if words[5] == 'to:':
+            #    lengths += [int(words[6])]
+            if words.index("to:"):
+                lengths += [int(words[words.index("to:") + 1])]
+    # return the sequence names and lengths
+    return sequence, lengths
+
+
+# This function extracts the helix, sheet, turn, and coil predictions from the file. The predictions for each type of
+# secondary structure are joined together in one string.
+def garnier_secondary_struct(tagseq_file=None):
+    # opens the file and sets variables for the structural predictions
+    f = tagseq_file  # open(tagseq_file, 'r')
+    helix = ""
+    turns = ""
+    coil = ""
+    sheet = ""
+
+    # if the first work in the line indicates a structural prediction, it adds the rest of the line to the right
+    # prediction string.
+    for line in f:
+        words = line.split()
+        if len(words) > 0:
+            if words[0] in "helix":
+                helix += str(line[6:]).rstrip("\n")
+            elif words[0] in "sheet":
+                sheet += str(line[6:]).rstrip("\n")
+            elif words[0] in "turns":
+                turns += str(line[6:]).rstrip("\n")
+            elif words[0] in "coil":
+                coil += str(line[6:]).rstrip("\n")
+    # f.close()
+    # returns the four structural prediction strings
+    return helix, turns, coil, sheet
+
+
+# This functions cuts the strings based on the lengths of the original sequences. Lengths are given in a list.
+def vector_cutter(vector, lengths_to_cut):
+    # sets up iteration variables
+    start = 0
+    end = lengths_to_cut[0]
+    maximum = len(lengths_to_cut)
+    # creates output list
+    output = []
+    # loops through the number of sequences based on the number of lengths
+    for i in range(maximum):
+        # outputs list of sequence strings
+        output += [str(vector[start:end])]
+        start = end
+        if i + 1 != maximum:
+            end += lengths_to_cut[i + 1]
+    # returns list of strings. Each sequence has a string included in the list.
+    return output
+
+
+# this function takes the helix, turn, sheet, and coil predictions for each sequence and creates a single structural
+# prediction string.
+def single_prediction(helix, sheet, turns, coil):
+    # sets output list
+    secondary_structure = []
+    # checks to make sure each of the strings is the same length
+    if len(helix) == len(sheet) == len(coil) == len(turns):
+        # loops through the length of each sequence, and when the value is not a blank it is added to the output
+        # prediction list.
+        for j in range(len(helix)):
+            if helix[j] != " ":
+                secondary_structure += [str(helix[j])]
+            elif sheet[j] != " ":
+                secondary_structure += [str(sheet[j])]
+            elif coil[j] != " ":
+                secondary_structure += [str(coil[j])]
+            else:
+                secondary_structure += [str(turns[j])]
+    # returns the output prediction list for the sequence
+    return secondary_structure
+
+
+if __name__ == "__main__":
+    # Grab all of the filters from our plugin loader
+    parser = argparse.ArgumentParser(
+        description="Read Garnier Secondary Structure Prediction"
+    )
+    parser.add_argument(
+        "tagseq_file", type=argparse.FileType("r"), help="Tagseq file input"
+    )
+    args = parser.parse_args()
+
+    # opens the tagseq file and prepares for writing csv
+    # f = open(sys.stdout, 'w', newline='')
+    # writer = csv.writer(f)
+
+    # reads tagseq file for helix, turn, coil, and sheet sequences as well as for names and lengths of the sequences
+    # summarized in the tagseq file#!/usr/bin/env python\r
+    Hel, Tur, Coi, She = garnier_secondary_struct(**vars(args))
+    names, gives = garnier_sequences(**vars(args))
+
+    # cut each of the structural prediction strings so that they are individual sequences
+    Helix = vector_cutter(Hel, gives)
+    Sheet = vector_cutter(She, gives)
+    Turns = vector_cutter(Tur, gives)
+    Coil = vector_cutter(Coi, gives)
+
+    # for each sequence compile the four types of structural predictions into a single prediction, and output the final
+    # prediction in csv format and to the screen
+    for i in range(len(Helix)):
+        Final = single_prediction(Helix[i], Sheet[i], Turns[i], Coil[i])
+        # csv.writerow(['Sequence: '] + [names[i]])
+        # csv.writerow(Final)
+        print("Sequence Name: " + "\t" + names[i])
+        print("\t".join(Final))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cpt_read_garnier/reading_garnier_output.xml	Fri Jun 17 13:12:20 2022 +0000
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<tool id="edu.tamu.cpt2.phage.read_garnier" name="Read Garnier Secondary Structure Prediction" version="1.0">
+    <description>read garnier tool output</description>
+    <macros>
+		<import>macros.xml</import>
+		<import>cpt-macros.xml</import>
+    </macros>
+    <expand macro="requirements"/>
+	<command detect_errors="aggressive"><![CDATA[
+python $__tool_directory__/reading_garnier_output.py
+$tagseq_file
+
+
+>$output]]></command>
+    <inputs>
+        <param label="Tagseq" name="tagseq_file" type="data" format="tagseq" />
+    </inputs>
+    <outputs>
+		<data format="tabular" name="output"/>
+    </outputs>
+    <help><![CDATA[
+**What it does**
+This program takes output from the garnier tool (as a tagseq file) and converts it into structural predictions for each 
+sequence analyzed by the garnier tool. 
+
+        ]]></help>
+		<expand macro="citations" />
+</tool>