view ludwig_evaluate.xml @ 0:70a4d910f09a draft default tip

planemo upload for repository https://github.com/goeckslab/Galaxy-Ludwig.git commit bdea9430787658783a51cc6c2ae951a01e455bb4
author goeckslab
date Tue, 07 Jan 2025 22:46:16 +0000
parents
children
line wrap: on
line source

<tool id="ludwig_evaluate" name="Ludwig Evaluate" version="@VERSION@" profile="@PROFILE@">
    <description>loads a pretrained model and evaluates its performance by comparing its predictions with ground truth</description>
    <macros>
        <import>ludwig_macros.xml</import>
    </macros>
    <expand macro="python_requirements_gpu" />
    <expand macro="macro_stdio" />
    <version_command>echo "@VERSION@"</version_command>
    <command>
        <![CDATA[
            #import re
            mkdir -p outputs &&
            #if $dataset
            #set $sanitized_dataset = re.sub('[^\w\-_\.]', '_', $dataset.element_identifier.strip()) 
            ln -sf '$dataset' "./${sanitized_dataset}";
            #end if
            #if $raw_data
                unzip -o -q '$raw_data' -d ./;
            #end if
            python '$__tool_directory__/ludwig_evaluate.py'
                #if $model_path
                --model_path '$model_path.extra_files_path'
                #end if
                #if $dataset
                --dataset "./${sanitized_dataset}"
                #end if
                #if $disable_parallel_threads
                --disable_parallel_threads
                #end if

                --output_directory "./outputs"
                --data_format '$data_format'
                --split '$split'
                --backend local
                --skip_save_unprocessed_output &&

            mkdir -p '$output_report.extra_files_path' &&
            cp outputs/*.json outputs/*.parquet '$output_report.extra_files_path' &&

            echo "Evaluation is Done!"
        ]]>
    </command>
    <configfiles>
        <inputs name="inputs" />
    </configfiles>
    <inputs>
        <param name="model_path" type="data" format="ludwig_model" label="Load the pretrained model" />
        <param name="dataset" type="data" format="tabular,csv,h5,json,txt" label="Input dataset" />
        <param name="data_format" type="select" label="Data format">
            <option value="auto" selected="true">auto</option>
            <option value="tsv">tsv</option>
            <option value="csv">csv</option>
            <option value="h5">h5</option>
            <option value="json">json</option>
        </param>
        <param name="split" type="select" label="Select the split portion to test the model on">
            <option value="training">training</option>
            <option value="validation">validation</option>
            <option value="test">test</option>
            <option value="full" selected="true">full</option>
        </param>
        <param name="batch_size" type="integer" value="128" optional="true" label="Batch size" min="1" max="4096"/>
        <param name="disable_parallel_threads" type="boolean" checked="false" label="Whether to disable parallel threads for reproducibility?" />
        <param name="raw_data" type="data" format="zip" optional="true" label="Raw data" help="Optional. Needed for images."/>
    </inputs>       
    <outputs>
        <collection type="list" name="output_pred_csv" label="${tool.name} predictions CSV/json on ${on_string}" >
            <discover_datasets pattern="(?P&lt;designation&gt;predictions_parquet\.csv)" format="csv" directory="outputs" />
            <discover_datasets pattern="(?P&lt;designation&gt;.+)\.json" format="json" directory="outputs" />
            <discover_datasets pattern="(?P&lt;designation&gt;.+)\.png" format="png" directory="outputs" />
        </collection>
        <data format="html" name="output_report" from_work_dir="outputs/ludwig_evaluate_report.html" label="${tool.name} report on ${on_string}" />
    </outputs>
    <tests>
        <test expect_num_outputs="2">
            <param name="model_path" value="" ftype="ludwig_model">
                <composite_data value="temp_model01/model_hyperparameters.json" />
                <composite_data value="temp_model01/model_weights" />
                <composite_data value="temp_model01/training_set_metadata.json" />
                <composite_data value="temp_model01/training_progress.json" />
            </param>
            <param name="dataset" value="temperature_la.csv" ftype="csv" />
            <param name="split" value="test" />
            <output name="output_report" ftype="html">
                <assert_contents>
                    <has_text text="Evaluate" />
                </assert_contents>
            </output>

            <output_collection name="output_pred_csv">
                <element name="predictions_parquet.csv">
                    <assert_contents>
                        <has_n_columns n="1" />
                    </assert_contents>
                </element>
            </output_collection>
        </test>
    </tests>
    <help>
        <![CDATA[
**What it does**
This tool conducts `ludwig evaluate`.


**Input**
- a trained ludwig model.
- dataset to be evaluate.


**Output**
- report in html.
- a collection of prediction results.


        ]]>
    </help>
    <expand macro="macro_citations" />
</tool>