Mercurial > repos > ebi-gxa > scanpy_run_pca
view scanpy-run-pca.xml @ 14:f7ce9ed8a4f3 draft
"planemo upload for repository https://github.com/ebi-gene-expression-group/container-galaxy-sc-tertiary/tree/develop/tools/tertiary-analysis/scanpy commit 2db9e8cecb8d373874318e53ec2d4d7a64dd1b24-dirty"
author | ebi-gxa |
---|---|
date | Fri, 11 Sep 2020 11:03:41 +0000 |
parents | 3e7ede8e1c94 |
children | 988edd5fa902 |
line wrap: on
line source
<?xml version="1.0" encoding="utf-8"?> <tool id="scanpy_run_pca" name="Scanpy RunPCA" version="@TOOL_VERSION@+galaxy0" profile="@PROFILE@"> <description>for dimensionality reduction</description> <macros> <import>scanpy_macros2.xml</import> </macros> <expand macro="requirements"/> <command detect_errors="exit_code"><![CDATA[ ln -s '${input_obj_file}' input.h5 && PYTHONIOENCODING=utf-8 scanpy-run-pca --n-comps '${n_pcs}' #if $run_mode.chunked --chunked --chunk-size '${run_mode.chunk_size}' #else ${run_mode.zero_center} #if $run_mode.svd_solver --svd-solver '${run_mode.svd_solver}' #end if #if $run_mode.random_seed is not None --random-state '${run_mode.random_seed}' #end if #end if #if $extra_outputs and "embeddings" in str($extra_outputs).split(','): --export-embedding embeddings.tsv #end if @INPUT_OPTS@ @OUTPUT_OPTS@ ]]></command> <inputs> <expand macro="input_object_params"/> <expand macro="output_object_params"/> <param name="n_pcs" argument="--n-comps" type="integer" min="2" value="50" label="Number of PCs to produce"/> <conditional name="run_mode"> <param name="chunked" argument="--chunked" type="boolean" checked="false" label="Perform incremental PCA by chunks"/> <when value="true"> <param name="chunk_size" argument="--chunk-size" type="integer" value="0" label="Chunk size"/> </when> <when value="false"> <param name="zero_center" argument="--zero-center" type="boolean" truevalue="" falsevalue="--no-zero-center" checked="true" label="Zero center data before scaling"/> <param name="svd_solver" argument="--svd-solver" type="select" optional="true" label="SVD solver"> <option value="arpack">ARPACK</option> <option value="randomized">Randomised</option> </param> <param name="random_seed" argument="--random-state" type="integer" value="0" label="random seed for numpy random number generator"/> </when> </conditional> <param name="extra_outputs" type="select" multiple="true" display="checkboxes" optional="true" label="Export extra output"> <option value="embeddings">PCA embeddings</option> </param> </inputs> <outputs> <expand macro="output_data_obj" description="PCA object"/> <data name="output_embed" format="tabular" from_work_dir="embeddings.tsv" label="${tool.name} on ${on_string}: PCA embeddings"> <filter>extra_outputs and 'embeddings' in extra_outputs.split(',')</filter> </data> </outputs> <tests> <test> <param name="input_obj_file" value="scale_data.h5"/> <param name="input_format" value="anndata"/> <param name="output_format" value="anndata"/> <param name="extra_outputs" value="embeddings"/> <param name="n_pcs" value="50"/> <param name="zero_center" value="true"/> <param name="svd_solver" value="arpack"/> <param name="random_seed" value="1"/> <param name="chunked" value="false"/> <output name="output_h5" file="run_pca.h5" ftype="h5" compare="sim_size"/> <output name="output_embed" file="run_pca.embeddings.tsv" ftype="tabular" compare="sim_size" > <assert_contents> <has_n_columns n="51"/> </assert_contents> </output> </test> </tests> <help><![CDATA[ ================================================================================ Dimensionality reduction by PCA (principal component analysis) (`scanpy.pp.pca`) ================================================================================ It uses the implementation of *scikit-learn*. @HELP@ @VERSION_HISTORY@ ]]></help> <expand macro="citations"/> </tool>