Mercurial > repos > ecology > sam3_semantic_segmentation
view sam3_semantic_segmentation.xml @ 2:809f339deae2 draft default tip
planemo upload for repository https://github.com/galaxyecology/tools-ecology/tree/master/tools/Sam3 commit 7b696f5e3039fc7f6f1b8ceb3d5262e230d0ab57
| author | ecology |
|---|---|
| date | Tue, 10 Mar 2026 10:42:43 +0000 |
| parents | 7c41c02b63b5 |
| children |
line wrap: on
line source
<tool id="sam3_semantic_segmentation" name="SAM3 Semantic Segmentation" version="1.0.0+galaxy0" profile="25.1"> <description> SAM3 performs text-prompted semantic segmentation on images or videos. </description> <requirements> <container type="docker">quay.io/arthur_barreau/sam3_tool:1.0.0</container> </requirements> <required_files> <include path="sam3_semantic_segmentation.py" /> </required_files> <command detect_errors="exit_code"><![CDATA[ mkdir data_files && #set $name_file = '' #if $input.input_kind == "image" #for $indata in $input.source ln -s '$indata' 'data_files/${indata.element_identifier}' && #set $name_file += $indata.element_identifier + " " #end for #else ln -s '$input.source' 'data_files/${input.source.element_identifier}' && #set $name_file = $input.source.element_identifier + " " #end if python '$__tool_directory__/sam3_semantic_segmentation.py' --model '$sam3_models' --prompts '$text_prompt' --conf '$conf' --vid_stride '$vid_stride' --outdir outputs --outputs $outputs_format --name_file '$name_file' ]]></command> <inputs> <param name="sam3_models" label="Model data" type="select" help="Contact the administrator of our Galaxy instance if you miss model data"> <options from_data_table="huggingface"> <filter type="static_value" column="5" value="1"/> </options> <validator message="No model annotation is available for SAM3" type="no_options"/> </param> <conditional name="input"> <param name="input_kind" type="select" label="Input type" help="Choose either multiple images or a single video (not both)"> <option value="image">One or more images</option> <option value="video">One video</option> </param> <when value="image"> <param name="source" type="data" format="jpg,png" multiple="true" label="Input images"/> </when> <when value="video"> <param name="source" type="data" format="mp4,avi,mov" multiple="false" label="Input video"/> </when> </conditional> <param name="text_prompt" type="text" label="Text prompt" > <help><![CDATA[ Comma-separated list of objects or classes to detect and segment in your images/videos. <br/> <u>Example for Wildlife:</u> <code>elephant, giraffe, lion</code> <br/> <u>Best practices:</u> Use clear, specific terms in English. Be consistent with singular/plural forms. ]]></help> </param> <param name="conf" type="float" value="0.25" min="0.01" max="1.0" label="Confidence threshold"/> <param name="vid_stride" type="integer" value="5" min="1" max="300" label="Video frame stride" help="For video input: process one frame every N frames."/> <param name="outputs_format" type="select" multiple="true" optional="true" label="Output formats"> <option value="coco">COCO</option> <option value="yolo_bbox">YOLO bounding boxes</option> <option value="yolo_seg">YOLO segmentation</option> </param> </inputs> <outputs> <data name="Annotations_coco" format="json" from_work_dir="./outputs/annotations.json" label="Annotation COCO" > <filter>outputs_format and "coco" in outputs_format</filter> </data> <collection name="Outputs_annotated" type="list"> <discover_datasets pattern="__name_and_ext__" directory="outputs/outputs_annotated"/> </collection> <collection name="Yolo_Bbox_Image" type="list" label="YOLO Bbox Images"> <filter>outputs_format and 'yolo_bbox' in outputs_format</filter> <discover_datasets pattern="__name_and_ext__" directory="outputs/yolo_bbox/images"/> </collection> <collection name="Yolo_Bbox_Label" type="list" label="YOLO Bbox Labels"> <filter>outputs_format and 'yolo_bbox' in outputs_format</filter> <discover_datasets pattern="__name_and_ext__" directory="outputs/yolo_bbox/labels"/> </collection> <collection name="Yolo_Seg_Image" type="list" label="YOLO Seg Images"> <filter>outputs_format and 'yolo_seg' in outputs_format</filter> <discover_datasets pattern="__name_and_ext__" directory="outputs/yolo_seg/images"/> </collection> <collection name="Yolo_Seg_Label" type="list" label="YOLO Seg Labels"> <filter>outputs_format and 'yolo_seg' in outputs_format</filter> <discover_datasets pattern="__name_and_ext__" directory="outputs/yolo_seg/labels"/> </collection> </outputs> <tests> <test expect_exit_code="1" expect_failure="true"> <param name="sam3_models" value="unknown"/> <conditional name="input"> <param name="input_kind" value="image" /> <param name="source" value="5827603936_3f1d5d715c_z.jpg" /> </conditional> <param name="text_prompt" value="elephant"/> <param name="conf" value="0.25"/> <param name="outputs_format" value="coco,yolo_bbox"/> <assert_stdout> <has_text text="Invalid model!"/> </assert_stdout> </test> </tests> <help><![CDATA[ =================================================== SAM3 – Text-Prompted Semantic Segmentation =================================================== SAM3 allows semantic segmentation of objects in images and videos using natural language prompts. Typical use cases include: - Wildlife or object segmentation in videos - Biomedical image segmentation (cells, tissues) - Environmental or ecological video analysis ------------------- How it works ------------------- 1. Provide an image or video 2. Enter a text prompt describing the object to segment 3. SAM3 generates segmentation masks and overlays 4. Results are returned as images or videos ------------------- Notes ------------------- - For large videos, increase the frame stride to reduce processing time ]]></help> <creator> <person name="Arthur Barreau" email="arthurbarreau.ab@gmail.com" /> <organization name="MOOREV" url="https://moorev.fr/" /> <organization name="PNDB" url="https://www.pndb.fr/pages/accueil/" /> </creator> <citations> <citation type="bibtex"> @article{sam3, title={SAM3: Segment Anything in Images and Videos with Language Prompts}, author={Ultralytics}, year={2024} } </citation> </citations> </tool>
