Mercurial > repos > iuc > snapatac2_preprocessing
view preprocessing.xml @ 2:48d9421bf176 draft
planemo upload for repository https://github.com/galaxyproject/tools-iuc/tree/master/tools/snapatac2 commit df9c285dddde7d901823c608c8d7dab971224b5b
author | iuc |
---|---|
date | Fri, 05 Jul 2024 11:06:01 +0000 |
parents | cec3e76eaf05 |
children | 4ecdea4cbea1 |
line wrap: on
line source
<tool id="snapatac2_preprocessing" name="SnapATAC2 Preprocessing" version="@TOOL_VERSION@+galaxy@VERSION_SUFFIX@" profile="@PROFILE@"> <description>and integration</description> <macros> <import>macros.xml</import> </macros> <requirements> <expand macro="requirements"/> </requirements> <command detect_errors="exit_code"><![CDATA[ #if $method.method != 'pp.make_fragment_file' and $method.method != 'pp.import_data' @PREP_ADATA@ #end if @CMD@ ]]></command> <configfiles> <configfile name="script_file"><![CDATA[ @CMD_imports@ #if $method.method != 'pp.make_fragment_file' and $method.method != 'pp.import_data' @CMD_read_inputs@ #end if #if $method.method == 'pp.make_fragment_file' sa.pp.make_fragment_file( bam_file = '$method.bam_file', is_paired = $method.is_paired, #if $method.barcode.extract_type == 'from_tag' #if $method.barcode.barcode_tag != '' barcode_tag = '$method.barcode.barcode_tag', #end if #elif $method.barcode.extract_type == 'from_read_names' #if $method.barcode.barcode_regex != '' barcode_regex = '$method.barcode.barcode_regex', #end if #end if #if $method.umi_tag != '' umi_tag = '$method.umi_tag', #end if #if $method.umi_regex != '' umi_regex = '$method.umi_regex', #end if shift_right = $method.shift_right, shift_left = $method.shift_left, min_mapq = $method.min_mapq, chunk_size = $method.chunk_size, compression = 'gzip', output_file = '$fragments_out', tempdir = "." ) #else if $method.method == 'pp.import_data' import csv with open('$method.chrom_sizes') as f: chr_sizes = {x[0]:int(x[1]) for x in csv.reader(f, delimiter='\t')} sa.pp.import_data( fragment_file = '$method.fragment_file', chrom_sizes = chr_sizes, min_num_fragments = $method.min_num_fragments, sorted_by_barcode = $method.sorted_by_barcode, #if str($method.whitelist) != 'None' whitelist = '$method.whitelist', #end if shift_left = $method.shift_left, shift_right = $method.shift_right, #set $chr_mt = ([x.strip() for x in str($method.chrM).split(',')]) chrM = $chr_mt, chunk_size = $method.chunk_size, file = 'anndata.h5ad', n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'pp.add_tile_matrix' sa.pp.add_tile_matrix( adata, bin_size = $method.bin_size, chunk_size = $method.chunk_size, #if $method.exclude_chroms != '' #set $excl_chroms = ([x.strip() for x in str($method.exclude_chroms).split(',')]) exclude_chroms = $excl_chroms, #end if #if $method.min_frag_size min_frag_size = $method.min_frag_size, #end if #if $method.max_frag_size max_frag_size = $method.max_frag_size, #end if ##counting_strategy = '$method.counting_strategy', count_frag_as_reads = $method.count_frag_as_reads, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'pp.make_gene_matrix' adata = sa.pp.make_gene_matrix( adata, gene_anno = '$method.gene_anno', chunk_size = $method.chunk_size, use_x = $method.use_x, id_type = '$method.id_type', transcript_name_key = '$method.transcript_name_key', transcript_id_key = '$method.transcript_id_key', gene_name_key = '$method.gene_name_key', gene_id_key = '$method.gene_id_key', #if $method.min_frag_size min_frag_size = $method.min_frag_size, #end if #if $method.max_frag_size max_frag_size = $method.max_frag_size, #end if ##counting_strategy = '$method.counting_strategy' count_frag_as_reads = $method.count_frag_as_reads ) #else if $method.method == 'pp.filter_cells' sa.pp.filter_cells( adata, min_counts = $method.min_counts, min_tsse = $method.min_tsse, #if $method.max_counts max_counts = $method.max_counts, #end if #if $method.max_tsse max_tsse = $method.max_tsse, #end if inplace = True, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'pp.select_features' sa.pp.select_features( adata, n_features = $method.n_features, filter_lower_quantile = $method.filter_lower_quantile, filter_upper_quantile = $method.filter_upper_quantile, #if str($method.whitelist) != 'None' whitelist = '$method.whitelist', #end if #if str($method.blacklist) != 'None' blacklist = '$method.blacklist', #end if max_iter = $method.max_iter, inplace = True, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'pp.scrublet' sa.pp.scrublet( adata, #if $method.features features = '$method.features', #end if n_comps = $method.n_comps, sim_doublet_ratio = $method.sim_doublet_ratio, expected_doublet_rate = $method.expected_doublet_rate, #if $method.n_neighbors n_neighbors = $method.n_neighbors, #end if use_approx_neighbors = $method.use_approx_neighbors, random_state = $method.random_state, inplace = True, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'pp.filter_doublets' sa.pp.filter_doublets( adata, #if $method.probability_threshold probability_threshold = $method.probability_threshold, #end if #if $method.score_threshold score_threshold = $method.score_threshold, #end if inplace = True, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'pp.mnc_correct' sa.pp.mnc_correct( adata, batch = '$method.batch', n_neighbors = $method.n_neighbors, n_clusters = $method.n_clusters, n_iter = $method.n_iter, @CMD_params_data_integration@ inplace = True, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'pp.harmony' sa.pp.harmony( adata, batch = '$method.batch', @CMD_params_data_integration@ inplace = True ) #else if $method.method == 'pp.scanorama_integrate' sa.pp.scanorama_integrate( adata, batch = '$method.batch', n_neighbors = $method.n_neighbors, @CMD_params_data_integration@ inplace = True ) #else if $method.method == 'metrics.frag_size_distr' sa.metrics.frag_size_distr( adata, max_recorded_size = $method.max_recorded_size, add_key = '$method.add_key', inplace = True, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #else if $method.method == 'metrics.tsse' sa.metrics.tsse( adata, gene_anno = '$method.gene_anno', inplace = True, n_jobs = int(os.getenv("GALAXY_SLOTS", 4)) ) #end if #if $method.method != 'pp.make_fragment_file' and $method.method != 'pp.import_data' @CMD_anndata_write_outputs@ #end if ]]></configfile> </configfiles> <inputs> <conditional name="method"> <param name="method" type="select" label="Method used for preprocessing"> <option value="pp.make_fragment_file">Convert a BAM file to a fragment file, using 'pp.make_fragment_file'</option> <option value="pp.import_data">Import data fragment files and compute basic QC metrics, using 'pp.import_data'</option> <option value="pp.add_tile_matrix">Generate cell by bin count matrix, using 'pp.add_tile_matrix'</option> <option value="pp.make_gene_matrix">Generate cell by gene activity matrix, using 'pp.make_gene_matrix'</option> <option value="pp.filter_cells">Filter cell outliers based on counts and numbers of genes expressed, using 'pp.filter_cells'</option> <option value="pp.select_features">Perform feature selection, using 'pp.select_features'</option> <option value="pp.scrublet">Compute probability of being a doublet using the scrublet algorithm, using 'pp.scrublet'</option> <option value="pp.filter_doublets">Remove doublets according to the doublet probability or doublet score, using 'pp.filter_doublets'</option> <option value="pp.mnc_correct">A modified MNN-Correct algorithm based on cluster centroid, using 'pp.mnc_correct'</option> <option value="pp.harmony">Use harmonypy to integrate different experiments,using 'pp.harmony'</option> <option value="pp.scanorama_integrate">Use Scanorama [Hie19] to integrate different experiments, using 'pp.scanorama_integrate'</option> <option value="metrics.frag_size_distr">Compute the fragment size distribution of the dataset, using 'metrics.frag_size_distr'</option> <option value="metrics.tsse">Compute the TSS enrichment score (TSSe) for each cell, using 'metrics.tsse'</option> </param> <when value="pp.make_fragment_file"> <param argument="bam_file" type="data" format="bam" label="File name of the BAM file"/> <param argument="is_paired" type="boolean" truevalue="True" falsevalue="False" checked="true" label="Indicate whether the BAM file contain paired-end reads"/> <conditional name="barcode"> <param name="extract_type" type="select" label="How to extract barcodes from BAM records?"> <option value="from_tag">From TAG fileds</option> <option value="from_read_names">From read names using regular expressions</option> </param> <when value="from_tag"> <param argument="barcode_tag" type="text" value="CB" optional="true" label="Extract barcodes from TAG fields of BAM records"/> </when> <when value="from_read_names"> <param argument="barcode_regex" type="text" value="" optional="true" label="Extract barcodes from read names of BAM records using regular expressions" help="`(..:..:..:..):\w+$` extracts `bd:69:Y6:10` from `A01535:24:HW2MMDSX2:2:1359:8513:3458:bd:69:Y6:10:TGATAGGTT``"/> </when> </conditional> <param argument="umi_tag" type="text" value="" optional="true" label="Extract UMI from TAG fields of BAM records"/> <param argument="umi_regex" type="text" value="" optional="true" label="Extract UMI from read names of BAM records using regular expressions"/> <expand macro="param_shift"/> <param argument="min_mapq" type="integer" min="0" value="30" label="Filter the reads based on MAPQ"/> <expand macro="param_chunk_size" size="50000000"/> </when> <when value="pp.import_data"> <param argument="fragment_file" type="data" format="interval" label="Fragment file, optionally compressed with gzip or zstd"/> <param argument="chrom_sizes" type="data" format="tabular" label="A tabular file containing chromosome names and sizes"/> <param argument="min_num_fragments" type="integer" value="200" label="Number of unique fragments threshold used to filter cells"/> <param argument="sorted_by_barcode" type="boolean" truevalue="True" falsevalue="False" checked="true" label="Whether the fragment file has been sorted by cell barcodes"/> <param argument="whitelist" type="data" format="txt" optional="True" label="Whitelist file with a list of barcodes" help="Each line must contain a valid barcode. When provided, only barcodes in the whitelist will be retained."/> <param argument="chrM" type="text" value="chrM, M" label="A list of chromosome names that are considered mitochondrial DNA"> <expand macro="sanitize_query"/> </param> <param argument="shift_left" type="integer" value="0" label="Insertion site correction for the left end" help="Note this has no effect on single-end reads"/> <param argument="shift_right" type="integer" value="0" label="Insertion site correction for the right end" help="Note this has no effect on single-end reads"/> <expand macro="param_chunk_size" size="2000"/> </when> <when value="pp.add_tile_matrix"> <expand macro="inputs_anndata"/> <param argument="bin_size" type="integer" value="500" label="The size of consecutive genomic regions used to record the counts"/> <expand macro="param_chunk_size" size="500"/> <param argument="exclude_chroms" type="text" value="chrM, chrY, M, Y" optional="true" label="A list of chromosomes to exclude"> <expand macro="sanitize_query"/> </param> <expand macro="min_max_frag_size"/> <!--expand macro="param_counting_strategy"/--> <param argument="count_frag_as_reads" type="boolean" truevalue="True" falsevalue="False" checked="true" label="Whether to count fragments as reads"/> </when> <when value="pp.make_gene_matrix"> <expand macro="inputs_anndata"/> <param argument="gene_anno" type="data" format="gtf,gff3" label="GTF/GFF file containing the gene annotation"/> <expand macro="param_chunk_size" size="500"/> <param argument="use_x" type="boolean" truevalue="True" falsevalue="False" checked="false" label="If True, use the matrix stored in .X as raw counts"/> <param argument="id_type" type="select" label="Id type, 'gene' or 'transcript'"> <option value="gene" selected="true">gene</option> <option value="transcript">transcript</option> </param> <param argument="transcript_name_key" type="text" value="transcript_name" label="The key of the transcript name in the gene annotation file"/> <param argument="transcript_id_key" type="text" value="transcript_id" label="The key of the transcript id in the gene annotation file"/> <param argument="gene_name_key" type="text" value="gene_name" label="The key of the gene name in the gene annotation file"/> <param argument="gene_id_key" type="text" value="gene_id" label="The key of the gene id in the gene annotation file"/> <expand macro="min_max_frag_size"/> <!--expand macro="param_counting_strategy"/--> <param argument="count_frag_as_reads" type="boolean" truevalue="True" falsevalue="False" checked="true" label="Whether to count fragments as reads"/> </when> <when value="pp.filter_cells"> <expand macro="inputs_anndata"/> <param argument="min_counts" type="integer" value="1000" label="Minimum number of counts required for a cell to pass filtering"/> <param argument="min_tsse" type="float" value="5.0" label="Minimum TSS enrichemnt score required for a cell to pass filtering"/> <param argument="max_counts" type="integer" value="" optional="true" label="Maximum number of counts required for a cell to pass filtering"/> <param argument="max_tsse" type="float" value="" optional="true" label="Maximum TSS enrichment score expressed required for a cell to pass filtering"/> </when> <when value="pp.select_features"> <expand macro="inputs_anndata"/> <param argument="n_features" type="integer" min="1" value="500000" label="Number of features to keep"/> <param argument="filter_lower_quantile" type="float" min="0" value="0.005" label="Lower quantile of the feature count distribution to filter out"/> <param argument="filter_upper_quantile" type="float" min="0" value="0.005" label="Upper quantile of the feature count distribution to filter out"/> <param argument="whitelist" type="data" format="bed" optional="true" label="A user provided bed file containing genome-wide whitelist regions"/> <param argument="blacklist" type="data" format="bed" optional="true" label="A user provided bed file containing genome-wide blacklist regions"/> <param argument="max_iter" type="integer" value="1" label="If greater than 1, this function will perform iterative clustering and feature selection"/> </when> <when value="pp.scrublet"> <expand macro="inputs_anndata"/> <param argument="features" type="text" value="" optional="true" label=" Boolean index mask, where True means that the feature is kept, and False means the feature is removed."/> <param argument="n_comps" type="integer" value="15" label="Number of components" help="15 is usually sufficient. The algorithm is not sensitive to this parameter"/> <param argument="sim_doublet_ratio" type="float" value="2.0" label="Number of doublets to simulate relative to the number of observed cells"/> <param argument="expected_doublet_rate" type="float" value="0.1" label="Expected doublet rate"/> <param argument="n_neighbors" type="integer" value="" optional="true" label="Number of neighbors used to construct the KNN graph of observed cells and simulated doublets"/> <param argument="use_approx_neighbors" type="boolean" truevalue="True" falsevalue="False" checked="false" label="Whether to use approximate search"/> <param argument="random_state" type="integer" value="0" label="Random state"/> </when> <when value="pp.filter_doublets"> <expand macro="inputs_anndata"/> <param argument="probability_threshold" type="float" value="0.5" label="Threshold for doublet probability"/> <param argument="score_threshold" type="float" value="" optional="true" label="Threshold for doublet score"/> </when> <when value="pp.mnc_correct"> <expand macro="inputs_anndata"/> <param argument="batch" type="text" value="batch" label="Batch labels for cells"> <expand macro="sanitize_query"/> </param> <param argument="n_neighbors" type="integer" value="5" label="Number of mutual nearest neighbors"/> <param argument="n_clusters" type="integer" value="40" label="Number of clusters"/> <param argument="n_iter" type="integer" value="1" label="Number of iterations"/> <expand macro="params_data_integration"/> </when> <when value="pp.harmony"> <expand macro="inputs_anndata"/> <param argument="batch" type="text" value="batch" label="Batch labels for cells"> <expand macro="sanitize_query"/> </param> <expand macro="params_data_integration"/> </when> <when value="pp.scanorama_integrate"> <expand macro="inputs_anndata"/> <param argument="batch" type="text" value="batch" label="Batch labels for cells"> <expand macro="sanitize_query"/> </param> <param argument="n_neighbors" type="integer" value="20" label="Number of mutual nearest neighbors"/> <expand macro="params_data_integration"/> </when> <when value="metrics.frag_size_distr"> <!-- TODO move this to plotting --> <expand macro="inputs_anndata"/> <param argument="max_recorded_size" type="integer" min="1" value="1000" label="The maximum fragment size to record in the result"/> <param argument="add_key" type="text" value="frag_size_distr" label="Key used to store the result in `adata.uns`"/> </when> <when value="metrics.tsse"> <!-- TODO move this to plotting --> <expand macro="inputs_anndata"/> <param argument="gene_anno" type="data" format="gtf,gff3" label="GTF/GFF file containing the gene annotation"/> </when> </conditional> <expand macro="inputs_common_advanced"/> </inputs> <outputs> <data name="fragments_out" format="interval" label="${tool.name} (${method.method}) on ${on_string}: Fragment file"> <filter>method['method'] == 'pp.make_fragment_file'</filter> </data> <data name="anndata_out" format="h5ad" from_work_dir="anndata.h5ad" label="${tool.name} (${method.method}) on ${on_string}: Annotated data matrix"> <filter>method['method'] != 'pp.make_fragment_file'</filter> </data> <data name="hidden_output" format="txt" label="Log file"> <filter>advanced_common['show_log']</filter> </data> </outputs> <tests> <test expect_num_outputs="1"> <!-- pp.make_fragment_file --> <conditional name="method"> <param name="method" value="pp.make_fragment_file"/> <param name="bam_file" location="https://zenodo.org/records/11260316/files/pbmc_500_chr21_subsample.bam"/> <param name="is_paired" value="true"/> <conditional name="barcode"> <param name="extract_type" value="from_tag"/> <param name="barcode_tag" value="CB"/> </conditional> <param name="shift_left" value="4"/> <param name="shift_right" value="-5"/> <param name="min_mapq" value="10"/> <param name="chunk_size" value="50000000"/> </conditional> <output name="fragments_out" location="https://zenodo.org/records/11260316/files/pp.make_fragment_file.pbmc_500_chr21.tsv.gz" ftype="interval" compare="sim_size" delta_frac="0.1"/> </test> <test expect_num_outputs="2"> <!-- pp.pp.import_data --> <conditional name="method"> <param name="method" value="pp.import_data"/> <param name="fragment_file" location="https://zenodo.org/records/11260316/files/pbmc_500_chr21.tsv.gz"/> <param name="chrom_sizes" location="https://zenodo.org/records/11260316/files/chr21_size.tabular"/> <param name="min_num_fragments" value="1"/> <param name="sorted_by_barcode" value="False"/> <param name="shift_left" value="0"/> <param name="chrM" value="chrM, M"/> <param name="shift_right" value="0"/> <param name="chunk_size" value="1000"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true"/> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.import_data"/> <has_text_matching expression="min_num_fragments = 1"/> <has_text_matching expression="sorted_by_barcode = False"/> <has_text_matching expression="shift_left = 0"/> <has_text_matching expression="chrM = \['chrM', 'M'\]"/> <has_text_matching expression="shift_right = 0"/> <has_text_matching expression="chunk_size = 1000"/> </assert_contents> </output> <output name="anndata_out" location="https://zenodo.org/records/11260316/files/pp.import_data.pbmc_500_chr21.h5ad" ftype="h5ad" compare="sim_size" delta_frac="0.1"/> </test> <test expect_num_outputs="2"> <!-- pp.make_gene_matrix --> <conditional name="method"> <param name="method" value="pp.make_gene_matrix"/> <param name="adata" location="https://zenodo.org/records/11260316/files/tl.leiden.modularity.pbmc_500_chr21.h5ad"/> <param name="gene_anno" location="https://zenodo.org/records/11260316/files/chr21.gff3.gz"/> <param name="chunk_size" value="500"/> <param name="use_x" value="False"/> <param name="id_type" value="gene"/> <param name="transcript_name_key" value="transcript_name"/> <param name="transcript_id_key" value="transcript_id"/> <param name="gene_name_key" value="gene_name"/> <param name="gene_id_key" value="gene_id"/> <param name="count_frag_as_reads" value="True"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.make_gene_matrix"/> <has_text_matching expression="chunk_size = 500"/> <has_text_matching expression="use_x = False"/> <has_text_matching expression="id_type = 'gene'"/> <has_text_matching expression="transcript_name_key = 'transcript_name'"/> <has_text_matching expression="transcript_id_key = 'transcript_id'"/> <has_text_matching expression="gene_name_key = 'gene_name'"/> <has_text_matching expression="gene_id_key = 'gene_id'"/> <has_text_matching expression="count_frag_as_reads = True"/> </assert_contents> </output> <output name="anndata_out" location="https://zenodo.org/records/12548681/files/pp.make_gene_matrix.pbmc_500_chr21.h5ad" ftype="h5ad" compare="sim_size" delta_frac="0.1" /> </test> <test expect_num_outputs="2"> <!-- metrics.tsse --> <conditional name="method"> <param name="method" value="metrics.tsse"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pp.import_data.pbmc_500_chr21.h5ad"/> <param name="gene_anno" location="https://zenodo.org/records/11260316/files/chr21.gff3.gz"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.metrics.tsse"/> </assert_contents> </output> <output name="anndata_out" location="https://zenodo.org/records/11260316/files/metrics.tsse.pbmc_500_chr21.h5ad" ftype="h5ad" compare="sim_size" delta_frac="0.1" /> </test> <test expect_num_outputs="2"> <!-- pp.filter_cells --> <conditional name="method"> <param name="method" value="pp.filter_cells"/> <param name="adata" location="https://zenodo.org/records/11260316/files/metrics.tsse.pbmc_500_chr21.h5ad"/> <param name="min_counts" value="200"/> <param name="min_tsse" value="5"/> <param name="max_counts" value="10000"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.filter_cells"/> <has_text_matching expression="min_counts = 200"/> <has_text_matching expression="min_tsse = 5"/> <has_text_matching expression="max_counts = 10000"/> </assert_contents> </output> <output name="anndata_out" location="https://zenodo.org/records/11260316/files/pp.filter_cells.pbmc_500_chr21.h5ad" ftype="h5ad" compare="sim_size" delta_frac="0.1" /> </test> <test expect_num_outputs="2"> <!-- pp.add_tile_matrix --> <conditional name="method"> <param name="method" value="pp.add_tile_matrix"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pp.filter_cells.pbmc_500_chr21.h5ad"/> <param name="bin_size" value="5000"/> <param name="chunk_size" value="500"/> <param name="exclude_chroms" value="chr1, chr2, chr3, chr4, chr5, chr6, chr7, chr8, chr9, chr10, chr11, chr12, chr13, chr14, chr15, chr16, chr17, chr18, chr19, chr20, chr22, chrX, chrY"/> <param name="count_frag_as_reads" value="True"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.add_tile_matrix"/> <has_text_matching expression="bin_size = 5000"/> <has_text_matching expression="chunk_size = 500"/> <has_text_matching expression="exclude_chroms = \['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr22', 'chrX', 'chrY'\]"/> <has_text_matching expression="count_frag_as_reads = True"/> </assert_contents> </output> <output name="anndata_out" ftype="h5ad" compare="sim_size" delta_frac="0.1" location="https://zenodo.org/records/11260316/files/pp.add_tile_matrix.pbmc_500_chr21.h5ad"/> </test> <test expect_num_outputs="2"> <!-- pp.select_features --> <conditional name="method"> <param name="method" value="pp.select_features"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pp.add_tile_matrix.pbmc_500_chr21.h5ad"/> <param name="n_features" value="15000"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.select_features"/> <has_text_matching expression="n_features = 15000"/> </assert_contents> </output> <output name="anndata_out" ftype="h5ad" compare="sim_size" delta_frac="0.1" location="https://zenodo.org/records/11260316/files/pp.select_features.pbmc_500_chr21.h5ad"/> </test> <test expect_num_outputs="2"> <!-- pp.scrublet --> <conditional name="method"> <param name="method" value="pp.scrublet"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pp.select_features.pbmc_500_chr21.h5ad"/> <param name="n_comps" value="15"/> <param name="sim_doublet_ratio" value="2.0"/> <param name="expected_doublet_rate" value="0.1"/> <param name="random_state" value="0"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.scrublet"/> <has_text_matching expression="n_comps = 15"/> <has_text_matching expression="sim_doublet_ratio = 2.0"/> <has_text_matching expression="expected_doublet_rate = 0.1"/> <has_text_matching expression="random_state = 0"/> </assert_contents> </output> <output name="anndata_out" ftype="h5ad" compare="sim_size" delta_frac="0.1" location="https://zenodo.org/records/11260316/files/pp.scrublet.pbmc_500_chr21.h5ad"/> </test> <test expect_num_outputs="2"> <!-- pp.filter_doublets --> <conditional name="method"> <param name="method" value="pp.filter_doublets"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pp.scrublet.pbmc_500_chr21.h5ad"/> <param name="probability_threshold" value="0.1"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.filter_doublets"/> </assert_contents> </output> <output name="anndata_out" ftype="h5ad" compare="sim_size" delta_frac="0.1" location="https://zenodo.org/records/11260316/files/pp.filter_doublets.pbmc_500_chr21.h5ad"/> </test> <test expect_num_outputs="2"> <!-- pp.mnc_correct --> <conditional name="method"> <param name="method" value="pp.mnc_correct"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pbmc_500_chr21.batch.h5ad"/> <param name="batch" value="batch"/> <param name="n_neighbors" value="3"/> <param name="n_clusters" value="10"/> <param name="use_rep" value="X_spectral"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.mnc_correct"/> <has_text_matching expression="batch = 'batch'"/> <has_text_matching expression="n_neighbors = 3"/> <has_text_matching expression="n_clusters = 10"/> <has_text_matching expression="batch = 'batch'"/> <has_text_matching expression="use_rep = 'X_spectral'"/> </assert_contents> </output> <output name="anndata_out" ftype="h5ad" compare="sim_size" delta_frac="0.1" location="https://zenodo.org/records/11260316/files/pp.mnc_correct.pbmc_500_chr21.h5ad"/> </test> <test expect_num_outputs="2"> <!-- pp.harmony --> <conditional name="method"> <param name="method" value="pp.harmony"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pbmc_500_chr21.batch.h5ad"/> <param name="batch" value="batch"/> <param name="use_rep" value="X_spectral"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.harmony"/> <has_text_matching expression="batch = 'batch'"/> <has_text_matching expression="use_rep = 'X_spectral'"/> </assert_contents> </output> <output name="anndata_out" ftype="h5ad" compare="sim_size" delta_frac="0.1" location="https://zenodo.org/records/11260316/files/pp.harmony.pbmc_500_chr21.h5ad"/> </test> <test expect_num_outputs="2"> <!-- pp.scanorama_integrate --> <conditional name="method"> <param name="method" value="pp.scanorama_integrate"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pbmc_500_chr21.batch.h5ad"/> <param name="batch" value="batch"/> <param name="use_rep" value="X_spectral"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.pp.scanorama_integrate"/> <has_text_matching expression="batch = 'batch'"/> <has_text_matching expression="use_rep = 'X_spectral'"/> </assert_contents> </output> <output name="anndata_out" ftype="h5ad" compare="sim_size" delta_frac="0.1" location="https://zenodo.org/records/11260316/files/pp.scanorama_integrate.pbmc_500_chr21.h5ad"/> </test> <test expect_num_outputs="2"> <!-- metrics.frag_size_distr --> <conditional name="method"> <param name="method" value="metrics.frag_size_distr"/> <param name="adata" location="https://zenodo.org/records/11260316/files/pp.import_data.pbmc_500_chr21.h5ad"/> <param name="max_recorded_size" value="500"/> <param name="add_key" value="frag_size_distr"/> </conditional> <section name="advanced_common"> <param name="show_log" value="true" /> </section> <output name="hidden_output"> <assert_contents> <has_text_matching expression="sa.metrics.frag_size_distr"/> <has_text_matching expression="add_key = 'frag_size_distr'"/> </assert_contents> </output> <output name="anndata_out" location="https://zenodo.org/records/11260316/files/metrics.frag_size_distr.pbmc_500_chr21.h5ad" ftype="h5ad" compare="sim_size" delta_frac="0.1" /> </test> </tests> <help><![CDATA[ Convert a BAM file`to a fragment file, using `pp.make_fragment_file` ==================================================================== Convert a BAM file to a fragment file. Convert a BAM file to a fragment file by performing the following steps: - Filtering: remove reads that are unmapped, not primary alignment, mapq < 30, fails platform/vendor quality checks, or optical duplicate. For paired-end sequencing, it also removes reads that are not properly aligned. - Deduplicate: Sort the reads by cell barcodes and remove duplicated reads for each unique cell barcode. - Output: Convert BAM records to fragments (if paired-end) or single-end reads. The bam file needn’t be sorted or filtered. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.make_fragment_file.html>`__ Import data fragment file` and compute basic QC metrics, using `pp.import_data` =============================================================================== Import data fragment files and compute basic QC metrics. A fragment refers to the sequence data originating from a distinct location in the genome. In single-ended sequencing, one read equates to a fragment. However, in paired-ended sequencing, a fragment is defined by a pair of reads. This function is designed to handle, store, and process input files with fragment data, further yielding a range of basic Quality Control (QC) metrics. These metrics include the total number of unique fragments, duplication rates, and the percentage of mitochondrial DNA detected. How fragments are stored is dependent on the sequencing approach utilized. For single-ended sequencing, fragments are found in `.obsm['fragment_single']`. In contrast, for paired-ended sequencing, they are located in `.obsm['fragment_paired']`. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.import_data.html>`__ Generate cell by bin count matrix, using `pp.add_tile_matrix` ============================================================= Generate cell by bin count matrix. This function is used to generate and add a cell by bin count matrix to the AnnData object. `import_data` must be ran first in order to use this function. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.add_tile_matrix.html>`__ Generate cell by gene activity matrix, using `pp.make_gene_matrix` ================================================================== Generate cell by gene activity matrix. Generate cell by gene activity matrix by counting the TN5 insertions in gene body regions. The result will be stored in a new file and a new AnnData object will be created. `import_data` must be ran first in order to use this function. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.make_gene_matrix.html>`__ Filter cell outliers based on counts and numbers of genes expressed, using `pp.filter_cells` ============================================================================================ Filter cell outliers based on counts and numbers of genes expressed. For instance, only keep cells with at least `min_counts` counts or `min_ts`` TSS enrichment scores. This is to filter measurement outliers, i.e. “unreliable” observations. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.filter_cells.html>`__ Perform feature selection, using `pp.select_features` ===================================================== Perform feature selection by selecting the most accessibile features across all cells unless `max_iter` > 1 More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.select_features.html>`__ Compute probability of being a doublet using the scrublet algorithm, using `pp.scrublet` ======================================================================================== Compute probability of being a doublet using the scrublet algorithm. This function identifies doublets by generating simulated doublets using randomly pairing chromatin accessibility profiles of individual cells. The simulated doublets are then embedded alongside the original cells using the spectral embedding algorithm in this package. A k-nearest-neighbor classifier is trained to distinguish between the simulated doublets and the authentic cells. This trained classifier produces a “doublet score” for each cell. The doublet scores are then converted into probabilities using a Gaussian mixture model. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.scrublet.html>`__ Remove doublets according to the doublet probability or doublet score, using `pp.filter_doublets` ================================================================================================= Remove doublets according to the doublet probability or doublet score. The user can choose to remove doublets by either the doublet probability or the doublet score. `scrublet` must be ran first in order to use this function. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.filter_doublets.html>`__ A modified MNN-Correct algorithm based on cluster centroid, using `pp.mnc_correct` ================================================================================== A modified MNN-Correct algorithm based on cluster centroid. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.mnc_correct.html>`__ Use harmonypy to integrate different experiments,using `pp.harmony` =================================================================== Use harmonypy to integrate different experiments. Harmony is an algorithm for integrating single-cell data from multiple experiments. This function uses the python port of Harmony, `harmonypy`, to integrate single-cell data stored in an AnnData object. This function should be run after performing dimension reduction. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.harmony.html>`__ Use Scanorama to integrate different experiments, using `pp.scanorama_integrate` ======================================================================================== Use Scanorama to integrate different experiments. Scanorama is an algorithm for integrating single-cell data from multiple experiments stored in an AnnData object. This function should be run after performing `tl.spectral` but before computing the neighbor graph. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.pp.scanorama_integrate.html>`__ Compute the fragment size distribution of the dataset, using `metrics.frag_size_distr` ====================================================================================== Compute the fragment size distribution of the dataset. This function computes the fragment size distribution of the dataset. Note that it does not operate at the single-cell level. The result is stored in a vector where each element represents the number of fragments and the index represents the fragment length. The first posision of the vector is reserved for fragments with size larger than the `max_recorded_size` parameter. `import_data` must be ran first in order to use this function. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.metrics.frag_size_distr.html>`__ Compute the TSS enrichment score (TSSe) for each cell, using `metrics.tsse` =========================================================================== Compute the TSS enrichment score (TSSe) for each cell. `import_data` must be ran first in order to use this function. More details on the `SnapATAC2 documentation <https://kzhang.org/SnapATAC2/api/_autosummary/snapatac2.metrics.tsse.html>`__ ]]></help> <expand macro="citations"/> </tool>