name: RIA Hub Workflow Demo on: push: branches: [main] pull_request: branches: [main] jobs: ria-demo: runs-on: ubuntu-latest-2080 steps: - name: Print GPU information run: | if command -v nvidia-smi &> /dev/null; then echo "✅ NVIDIA GPU is available" nvidia-smi else echo "⚠️ No NVIDIA GPU found" fi - name: Checkout code uses: actions/checkout@v4 with: lfs: true - name: Set up Python uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt - name: 1. Build HDF5 Dataset run: | mkdir -p data/dataset PYTHONPATH=. python data/scripts/produce_dataset.py echo "datasets produced successfully" shell: bash - name: Upload Dataset Artifacts uses: actions/upload-artifact@v3 with: name: ria-dataset path: data/dataset/** - name: 2. Train Model env: NO_NNPACK: 1 PYTORCH_NO_NNPACK: 1 run: | PYTHONPATH=. python data/training/train.py echo "training model" - name: Upload Checkpoints uses: actions/upload-artifact@v3 with: name: ria-checkpoints path: checkpoint_files/inference_recognition_model.ckpt - name: 3. Convert to ONNX file run: | PYTHONPATH=. python onnx_scripts/convert_to_onnx.py echo "building inference app" - name: Upload ONNX file uses: actions/upload-artifact@v3 with: name: ria-demo-onnx path: onnx_files/inference_recognition_model.onnx - name: 4. Profile ONNX model run: | PYTHONPATH=. python onnx_scripts/profile_onnx.py - name: Upload JSON profiling data uses: actions/upload-artifact@v3 with: name: profile-data path: '**/onnxruntime_profile_*.json' - name: 4. Convert to ORT file run: | python -m onnxruntime.tools.convert_onnx_models_to_ort \ /workspace/qoherent/modrec-workflow/onnx_files/inference_recognition_model.onnx \ --output_dir ort_files \ --optimization_style Fixed \ --target_platform amd64 - name: Upload ORT file uses: actions/upload-artifact@v3 with: name: ria-demo-ort path: ort_files/inference_recognition_model.ort