modrec-workflow/.riahub/workflows/workflow.yaml

91 lines
2.3 KiB
YAML
Raw Normal View History

2025-05-14 15:40:08 -04:00
name: RIA Hub Workflow Demo
on:
push:
branches:
[main]
pull_request:
branches:
[main]
2025-05-14 15:40:08 -04:00
jobs:
ria-demo:
runs-on: ubuntu-latest-2080
steps:
- name: Print GPU information
run: |
if command -v nvidia-smi &> /dev/null; then
echo "✅ NVIDIA GPU is available"
nvidia-smi
else
echo "⚠️ No NVIDIA GPU found"
fi
- name: Checkout code
uses: actions/checkout@v4
with:
lfs: true
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
2025-05-26 10:06:18 -04:00
# - name: 1. Build HDF5 Dataset
# run: |
# mkdir -p data/dataset
# PYTHONPATH=. python data/scripts/produce_dataset.py
# echo "datasets produced successfully"
# shell: bash
- name: Upload Dataset Artifacts
uses: actions/upload-artifact@v3
with:
name: ria-dataset
path: data/dataset/**
- name: 2. Train Model
2025-05-23 10:06:46 -04:00
env:
NO_NNPACK: 1
2025-05-23 10:10:07 -04:00
PYTORCH_NO_NNPACK: 1
run: |
2025-05-23 10:10:07 -04:00
PYTHONPATH=. python data/training/train.py 2>/dev/null
echo "training model"
- name: Upload Checkpoints
uses: actions/upload-artifact@v3
with:
name: ria-checkpoints
path: checkpoint_files/inference_recognition_model.ckpt
2025-05-14 15:40:08 -04:00
- name: 3. Convert to ONNX file
run: |
PYTHONPATH=. python convert_to_onnx.py
echo "building inference app"
- name: Upload ONNX file
uses: actions/upload-artifact@v3
with:
name: ria-demo-onnx
path: onnx_files/inference_recognition_model.onnx
- name: 4. Convert to ORT file
run: |
python -m onnxruntime.tools.convert_onnx_models_to_ort \
--input /onnx_files/inference_recognition_model.onnx \
--output /ort_files/inference_recognition_model.ort \
2025-05-14 15:40:08 -04:00
- name: Upload ORT file
uses: actions/upload-artifact@v3
with:
name: ria-demo-ort
path: ort_files/inference_recognition_model.ort