Compare commits
5 Commits
77179d38f3
...
53d0552fd4
Author | SHA1 | Date | |
---|---|---|---|
![]() |
53d0552fd4 | ||
![]() |
372de4d1c4 | ||
![]() |
1267833806 | ||
![]() |
c9e996bac8 | ||
![]() |
c12ba88b78 |
|
@ -1,4 +1,4 @@
|
||||||
name: RIA Hub Workflow Demo
|
name: Modulation Recognition Demo
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
|
@ -11,9 +11,6 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
ria-demo:
|
ria-demo:
|
||||||
runs-on: ubuntu-latest-2080
|
runs-on: ubuntu-latest-2080
|
||||||
env:
|
|
||||||
RIAGIT_USERNAME: ${{ secrets.USERNAME }}
|
|
||||||
RIAGIT_TOKEN: ${{ secrets.TOKEN }}
|
|
||||||
steps:
|
steps:
|
||||||
- name: Print GPU information
|
- name: Print GPU information
|
||||||
run: |
|
run: |
|
||||||
|
@ -24,7 +21,7 @@ jobs:
|
||||||
echo "⚠️ No NVIDIA GPU found"
|
echo "⚠️ No NVIDIA GPU found"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout project code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
lfs: true
|
lfs: true
|
||||||
|
@ -42,13 +39,10 @@ jobs:
|
||||||
utils \
|
utils \
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
- name: 1. Generate Recordings
|
- name: 1. Generate Recordings
|
||||||
run: |
|
run: |
|
||||||
mkdir -p data/recordings
|
mkdir -p data/recordings
|
||||||
PYTHONPATH=. python scripts/dataset_building/data_gen.py --output-dir data/recordings
|
PYTHONPATH=. python scripts/dataset_manager/data_gen.py --output-dir data/recordings
|
||||||
echo "recordings produced successfully"
|
|
||||||
|
|
||||||
- name: ⬆️ Upload recordings
|
- name: ⬆️ Upload recordings
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
|
@ -59,11 +53,10 @@ jobs:
|
||||||
- name: 2. Build HDF5 Dataset
|
- name: 2. Build HDF5 Dataset
|
||||||
run: |
|
run: |
|
||||||
mkdir -p data/dataset
|
mkdir -p data/dataset
|
||||||
PYTHONPATH=. python scripts/dataset_building/produce_dataset.py
|
PYTHONPATH=. python scripts/dataset_manager/produce_dataset.py
|
||||||
echo "datasets produced successfully"
|
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
- name: 📤 Upload Dataset
|
- name: ⬆️ Upload Dataset
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: dataset
|
name: dataset
|
||||||
|
@ -75,34 +68,30 @@ jobs:
|
||||||
PYTORCH_NO_NNPACK: 1
|
PYTORCH_NO_NNPACK: 1
|
||||||
run: |
|
run: |
|
||||||
mkdir -p checkpoint_files
|
mkdir -p checkpoint_files
|
||||||
PYTHONPATH=. python scripts/training/train.py 2>/dev/null
|
PYTHONPATH=. python scripts/model_builder/train.py 2>/dev/null
|
||||||
echo "training model"
|
|
||||||
|
|
||||||
- name: 4. Plot Model
|
- name: 4. Plot Model
|
||||||
env:
|
env:
|
||||||
NO_NNPACK: 1
|
NO_NNPACK: 1
|
||||||
PYTORCH_NO_NNPACK: 1
|
PYTORCH_NO_NNPACK: 1
|
||||||
run: |
|
run: |
|
||||||
PYTHONPATH=. python scripts/training/plot_data.py 2>/dev/null
|
PYTHONPATH=. python scripts/model_builder/plot_data.py 2>/dev/null
|
||||||
|
|
||||||
|
- name: ⬆️ Upload Checkpoints
|
||||||
- name: Upload Checkpoints
|
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: checkpoints
|
name: checkpoints
|
||||||
path: checkpoint_files/*
|
path: checkpoint_files/*
|
||||||
|
|
||||||
|
- name: 5. Export model to ONNX graph
|
||||||
- name: 5. Convert to ONNX file
|
|
||||||
env:
|
env:
|
||||||
NO_NNPACK: 1
|
NO_NNPACK: 1
|
||||||
PYTORCH_NO_NNPACK: 1
|
PYTORCH_NO_NNPACK: 1
|
||||||
run: |
|
run: |
|
||||||
mkdir -p onnx_files
|
mkdir -p onnx_files
|
||||||
MKL_DISABLE_FAST_MM=1 PYTHONPATH=. python scripts/onnx/convert_to_onnx.py 2>/dev/null
|
MKL_DISABLE_FAST_MM=1 PYTHONPATH=. python scripts/application_packager/convert_to_onnx.py 2>/dev/null
|
||||||
echo "building inference app"
|
|
||||||
|
|
||||||
- name: Upload ONNX file
|
- name: ⬆️ Upload ONNX file
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: onnx-file
|
name: onnx-file
|
||||||
|
@ -110,21 +99,20 @@ jobs:
|
||||||
|
|
||||||
- name: 6. Profile ONNX model
|
- name: 6. Profile ONNX model
|
||||||
run: |
|
run: |
|
||||||
PYTHONPATH=. python scripts/onnx/profile_onnx.py
|
PYTHONPATH=. python scripts/application_packager/profile_onnx.py
|
||||||
|
|
||||||
- name: Upload JSON profiling data
|
- name: ⬆️ Upload JSON trace
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: profile-data
|
name: profile-data
|
||||||
path: '**/onnxruntime_profile_*.json'
|
path: '**/onnxruntime_profile_*.json'
|
||||||
|
|
||||||
- name: 7. Convert to ORT file
|
- name: 7. Convert ONNX graph to an ORT file
|
||||||
run: |
|
run: |
|
||||||
PYTHONPATH=. python scripts/ort/convert_to_ort.py
|
PYTHONPATH=. python scripts/application_packager/convert_to_ort.py
|
||||||
|
|
||||||
- name: Upload ORT file
|
- name: ⬆️ Upload ORT file
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ort-file
|
name: ort-file
|
||||||
path: ort_files/inference_recognition_model.ort
|
path: ort_files/inference_recognition_model.ort
|
||||||
|
|
||||||
|
|
163
README.md
163
README.md
|
@ -1,8 +1,7 @@
|
||||||
# Modulation Recognition Demo
|
# Modulation Recognition Demo
|
||||||
|
|
||||||
RIA Hub Workflows is an automation platform built into RIA Hub. This project contains an example machine learning
|
RIA Hub Workflows is an automation platform integrated into RIA Hub. This project provides an example machine learning
|
||||||
workflow for the problem of signal modulation classification. It also serves as an excellent introduction to
|
workflow for signal modulation classification, offering a practical introduction to RIA Hub Workflows
|
||||||
RIA Hub Workflows.
|
|
||||||
|
|
||||||
|
|
||||||
## 📡 The machine learning development workflow
|
## 📡 The machine learning development workflow
|
||||||
|
@ -10,24 +9,24 @@ RIA Hub Workflows.
|
||||||
The development of intelligent radio solutions involves multiple steps:
|
The development of intelligent radio solutions involves multiple steps:
|
||||||
|
|
||||||
1. First, we need to prepare a machine learning-ready dataset. This involves signal synthesis or capture, followed by
|
1. First, we need to prepare a machine learning-ready dataset. This involves signal synthesis or capture, followed by
|
||||||
dataset curation to extract and qualify training examples. Finally, we need to perform any required data preprocessing
|
dataset curation to extract and qualify examples. Finally, we need to perform any required data preprocessing—such as
|
||||||
—such as augmentation—and split the dataset into training and test sets.
|
augmentation—and split the dataset into training and test sets.
|
||||||
|
|
||||||
|
|
||||||
2. Secondly, we need to design and train a model. This is often an iterative process and can leverage techniques like
|
2. Secondly, we need to design and train a model. This is typically an iterative process, often accelerated using
|
||||||
Neural Architecture Search (NAS) and hyperparameter optimization to automate finding a suitable model structure and
|
techniques such as Neural Architecture Search (NAS) and hyperparameter optimization (HPO), which help automate the
|
||||||
optimal hyperparameter configuration, respectively.
|
discovery of an effective model structure and optimal hyperparameter settings.
|
||||||
|
|
||||||
|
|
||||||
3. Once a machine learning model has been trained and validated, the next step is to build an inference application.
|
3. Once a machine learning model has been trained and validated, the next step is to build an inference application.
|
||||||
This step transforms the model from a research artifact into a practical tool capable of making predictions in
|
This step transforms the model from a research artifact into a practical tool capable of making predictions in
|
||||||
real-world conditions. Building an inference application typically involves several substeps including model
|
real-world conditions. Building an inference application typically involves several steps including model
|
||||||
optimization, packaging and integration, and monitoring and logging.
|
optimization, packaging and integration, and monitoring and logging.
|
||||||
|
|
||||||
This is a lot of work, and much of it involves tedious software development and repetitive tasks like setting up and
|
This is a lot of work, and much of it involves tedious software development and repetitive tasks, like setting up and
|
||||||
configuring infrastructure. What's more? There is a shortage of domain expertize in ML and MLOps for radio. That's
|
configuring infrastructure. What's more? There is a shortage of domain expertize in ML and MLOps for radio. That's
|
||||||
where we come in. RIA Hub offers a no- and low-code solution for the end-to-end development of intelligent radio
|
where we come in. RIA Hub offers a no-code and low-code solution for automating the end-to-end development of
|
||||||
systems, allowing for a sharper focus on innovation.
|
intelligent radio systems.
|
||||||
|
|
||||||
|
|
||||||
## ▶️ RIA Hub Workflows
|
## ▶️ RIA Hub Workflows
|
||||||
|
@ -36,24 +35,33 @@ One of the core principles of RIA Hub is Workflows, which allow users to run job
|
||||||
|
|
||||||
You can create workflows in one of two ways:
|
You can create workflows in one of two ways:
|
||||||
- Writing YAML and placing it in the special `.riahub/workflows/` directory in your repository.
|
- Writing YAML and placing it in the special `.riahub/workflows/` directory in your repository.
|
||||||
|
|
||||||
|
|
||||||
- Using RIA Hub's built-in tools for Dataset Management, Model Building, and Application Development, which will
|
- Using RIA Hub's built-in tools for Dataset Management, Model Building, and Application Development, which will
|
||||||
automatically generate the YAML workflow file(s) for you.
|
automatically generate the YAML workflow file(s) for you.
|
||||||
|
|
||||||
Workflows can be configured to run automatically on push and pull request events. You can monitor and manage running
|
Workflows can be configured to run automatically on push and pull request events. You can monitor and manage running
|
||||||
workflows in the 'Workflows' tab in your repository.
|
workflows in the 'Workflows' tab in your repository.
|
||||||
|
|
||||||
|
Workflows require a _runner_, which retrieves job definitions from RIA Hub, executes them in isolated containers, and
|
||||||
|
reports the results back to RIA Hub. The next section outlines the convenience and advantage of using Qoherent-hosted
|
||||||
|
runners. The workflow configuration defines the specifications and settings of the available job containers.
|
||||||
|
|
||||||
|
The best part? RIA Hub Workflows are built on [Gitea Actions](https://docs.gitea.com/usage/actions/overview) (similar to [GitHub Actions](https://github.com/features/actions)), providing a
|
||||||
|
familiar syntax and allowing you to leverage a wide range of third-party Actions.
|
||||||
|
|
||||||
|
|
||||||
## ⚙️ Qoherent-hosted runners
|
## ⚙️ Qoherent-hosted runners
|
||||||
|
|
||||||
Qoherent-hosted runners are job containers that Qoherent provides and manages to run your workflows and jobs in RIA Hub
|
Qoherent-hosted runners are workflow runners that Qoherent provides and manages to run your workflows and jobs in
|
||||||
Workflows.
|
RIA Hub Workflows.
|
||||||
|
|
||||||
Why use GitHub-hosted runners?
|
Why use Qoherent-hosted runners?
|
||||||
- Easy to set up and start running workflows quickly, without the need to set up your own infrastructure.
|
- Start running workflows right away, without the need to set up your own infrastructure.
|
||||||
- Qoherent maintains runners equipped with access to common hardware and tools for radio ML development, including
|
- Qoherent maintains runners equipped with access to hardware and tools common for radio ML development, including
|
||||||
SDR testbeds and common embedded targets.
|
SDR testbeds and common embedded targets.
|
||||||
|
|
||||||
If you want to learn more about the runners we have available, please feel free to reach out. We can also provide
|
If you want to learn more about the runners we have available, [contact us](https://www.qoherent.ai/contact/) directly. We can also provide
|
||||||
custom runners equipped with specific radio hardware and RAN software upon request.
|
custom runners equipped with specific radio hardware and RAN software upon request.
|
||||||
|
|
||||||
Want to register your own runner? No problem! Please refer to the RIA Hub documentation for more details.
|
Want to register your own runner? No problem! Please refer to the RIA Hub documentation for more details.
|
||||||
|
@ -61,6 +69,18 @@ Want to register your own runner? No problem! Please refer to the RIA Hub docume
|
||||||
|
|
||||||
## 🔍 Modulation Recognition
|
## 🔍 Modulation Recognition
|
||||||
|
|
||||||
|
In radio, the modulation scheme refers to the method used to encode information onto a carrier signal. Common schemes
|
||||||
|
such as BPSK, QPSK, and QAM vary the amplitude, phase, or frequency of the signal in structured ways to represent
|
||||||
|
digital data. These schemes are fundamental to nearly all wireless communication systems, enabling efficient and
|
||||||
|
reliable transmission over different channels and under various noise conditions.
|
||||||
|
|
||||||
|
Machine learning-based modulation classification helps identify which modulation scheme is being used, especially
|
||||||
|
in scenarios where prior knowledge of the signal format is unavailable or unreliable. Traditional methods often rely
|
||||||
|
on expert-designed features and rule-based algorithms, which can struggle in real-world environments with multipath,
|
||||||
|
interference, or hardware impairments. In contrast, ML-based approaches can learn complex patterns directly from
|
||||||
|
raw signal data, offering higher robustness and adaptability. This is particularly valuable in applications like
|
||||||
|
cognitive radio, spectrum monitoring, electronic warfare, and autonomous communication systems, where accurate and
|
||||||
|
fast modulation recognition is critical.
|
||||||
|
|
||||||
|
|
||||||
## 🚀 Getting started
|
## 🚀 Getting started
|
||||||
|
@ -69,44 +89,61 @@ Want to register your own runner? No problem! Please refer to the RIA Hub docume
|
||||||
|
|
||||||
|
|
||||||
2. Enable Workflows (*Settings → Advanced Settings → Enable Repository Actions*).
|
2. Enable Workflows (*Settings → Advanced Settings → Enable Repository Actions*).
|
||||||
|
_TODO: Remove this point once default units have been updated to include actions in forks_
|
||||||
|
|
||||||
|
|
||||||
3. Check for available runners. The runner management tab can found at the top of the 'Workflows' tab. If no runners
|
3. Check for available runners. The runner management tab can found at the top of the 'Workflows' tab in your
|
||||||
are available, you'll need to register one before proceeding.
|
repository. If no runners are available, you'll need to register one before proceeding.
|
||||||
|
|
||||||
|
|
||||||
4. Clone down the project. For example:
|
4. Configure Git API credentials, if not suitable credentials are already set. This is required for accessing Utils
|
||||||
|
in the job container. This requires three steps:
|
||||||
|
|
||||||
|
- Create a personal access token with the following permissions: `read:packages` (*User Settings → Applications → Manage Access Tokens*).
|
||||||
|
|
||||||
|
- Create a Workflow Variable `RIAHUB_USER` with your RIA Hub username (*Repo Settings → Actions → Variables Management*)
|
||||||
|
|
||||||
|
- Create a Workflow Secret `RIAHUB_TOKEN` with the token created above (*Repo Settings → Actions → Secrets Management*)
|
||||||
|
|
||||||
|
_TODO: Remove this point once the Utils wheel file has been added to this project._
|
||||||
|
|
||||||
|
|
||||||
|
5. Clone down the project. For example:
|
||||||
```commandline
|
```commandline
|
||||||
git clone https://git.riahub.ai/user/modrec-workflow.git
|
git clone https://git.riahub.ai/user/modrec-workflow.git
|
||||||
cd modrec-workflow
|
cd modrec-workflow
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Set the workflow runner in `.riahub/workflows/workflow.yaml`. The runner is set on line 13:
|
6. Set the workflow runner in `.riahub/workflows/workflow.yaml`. The runner is set on line 13:
|
||||||
```yaml
|
```yaml
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest-2080
|
||||||
```
|
```
|
||||||
**Note:** We recommend running this demo on a GPU-enabled runner. If a GPU runner is not available, you can still run
|
**Note:** We recommend running this demo on a GPU-enabled runner. If a GPU runner is not available, you can still run
|
||||||
the workflow, but we suggest reducing the number of training epochs to keep runtime reasonable.
|
the workflow, but we suggest reducing the number of training epochs to keep runtime reasonable.
|
||||||
|
|
||||||
|
|
||||||
6. (Optional) Configure the workflow. All parameters—including file paths, model architecture, and training
|
7. (Optional) Configure the workflow. All parameters—including file paths, model architecture, and training
|
||||||
settings—are set in `conf/app.yaml`. Want to jump right in? The default configuration is suitable for getting started.
|
settings—are set in `conf/app.yaml`. Want to jump right in? No problem, the default configuration is suitable.
|
||||||
|
|
||||||
|
|
||||||
7. Push changes. This will start the workflow automatically.
|
8. Push changes. This will automatically trigger the workflow. You can monitor workflow progress under the 'Workflows'
|
||||||
|
tab in the repository.
|
||||||
|
|
||||||
|
|
||||||
8. Inspect the workflow output. You can expand and collapse individual steps to view their terminal output. A check
|
9. Inspect the workflow output. You can expand and collapse individual steps to view terminal output. A check
|
||||||
mark indicates that the step completed successfully.
|
mark indicates that the step completed successfully.
|
||||||
|
|
||||||
|
|
||||||
9. Inspect the workflow artifacts. Additional information on workflow artifacts can be found in the next section.
|
10. Inspect the workflow artifacts. Additional information on workflow artifacts can be found in the next section.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Workflow artifacts
|
## Workflow artifacts
|
||||||
|
|
||||||
The example generates several workflow artifacts, including:
|
This workflow generates several artifacts, including:
|
||||||
|
|
||||||
|
- `recordings`: Folder of synthetic signal recordings.
|
||||||
|
|
||||||
|
|
||||||
- `dataset`: The training and validation datasets: `train.h5` and `val.h5`, respectively.
|
- `dataset`: The training and validation datasets: `train.h5` and `val.h5`, respectively.
|
||||||
|
|
||||||
|
|
||||||
|
@ -121,18 +158,22 @@ stages of training.
|
||||||
by [ONNX Runtime](https://onnxruntime.ai/) for more efficient loading and execution.)
|
by [ONNX Runtime](https://onnxruntime.ai/) for more efficient loading and execution.)
|
||||||
|
|
||||||
|
|
||||||
- `profile-data`: Model execution traces, in JSON format.
|
- `profile-data`: Model execution traces, in JSON format. See the section below for instructions on how to inspect the
|
||||||
|
trace using Perfetto.
|
||||||
|
|
||||||
|
|
||||||
- `recordings`: Folder of synthesised signal recordings.
|
## 📊 Inspecting the model trace using Perfetto
|
||||||
|
|
||||||
|
|
||||||
|
[Perfetto](https://ui.perfetto.dev/) is an open-source trace visualization tool developed by Google. It provides a powerful web-based
|
||||||
|
interface for inspecting model execution traces. Perfetto is especially useful for identifying bottlenecks.
|
||||||
|
|
||||||
|
To inspect model trace, navigate to Perfetto. Select *Navigation → Open trace file*, and choose the JSON trace file
|
||||||
|
includes in the `profile-data` artifact.
|
||||||
|
|
||||||
|
|
||||||
## 🤝 Contribution
|
## 🤝 Contribution
|
||||||
|
|
||||||
We welcome contributions from the community! Whether it's an enhancement, bug fix, or new how-to guide, your
|
We welcome contributions from the community! Whether it's an enhancement, bug fix, or new tutorial, your
|
||||||
input is valuable. To get started, please [contact us](https://www.qoherent.ai/contact/) directly, we're looking forward to collaborating with
|
input is valuable. To get started, please [contact us](https://www.qoherent.ai/contact/) directly, we're looking forward to collaborating with
|
||||||
you. 🚀
|
you. 🚀
|
||||||
|
|
||||||
|
@ -158,57 +199,3 @@ This example is **free and open-source**, released under [AGPLv3](https://www.gn
|
||||||
|
|
||||||
Alternative licensing options are available. Alternative licensing options are available. Please [contact us](https://www.qoherent.ai/contact/)
|
Alternative licensing options are available. Alternative licensing options are available. Please [contact us](https://www.qoherent.ai/contact/)
|
||||||
for further details.
|
for further details.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Configure GitHub Secrets
|
|
||||||
|
|
||||||
Before running the pipeline, add the following repository secrets in GitHub (Settings → Secrets and variables → Actions):
|
|
||||||
|
|
||||||
- **RIAHUB_USER**: Your RIA Hub username.
|
|
||||||
- **RIAHUB_TOKEN**: RIA Hub access token with `read:packages` scope (from your RIA Hub account **Settings → Access Tokens**).
|
|
||||||
- **CLONER_TOKEN**: Personal access token for `stark_cloner_bot` with `read_repository` scope (from your on-prem Git server user settings).
|
|
||||||
|
|
||||||
Once secrets are configured, you can run the pipeline:
|
|
||||||
|
|
||||||
|
|
||||||
3.
|
|
||||||
|
|
||||||
|
|
||||||
## How to View the JSON Trace File
|
|
||||||
|
|
||||||
- Captures a full trace of model training and inference performance for profiling and debugging
|
|
||||||
- Useful for identifying performance bottlenecks, optimizing resource usage, and tracking metadata
|
|
||||||
-
|
|
||||||
Access this [link](https://ui.perfetto.dev/)
|
|
||||||
Click on Open Trace File -> Select your specific JSON trace file
|
|
||||||
Explore detailed visualizations of performance metrics, timelines, and resource usage to diagnose bottlenecks and optimize your workflow.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Submiting Issues
|
|
||||||
Found a bug or have a feature request?
|
|
||||||
Please submit an issue via the GitHub Issues page.
|
|
||||||
When reporting bugs, include:
|
|
||||||
Steps to reproduce
|
|
||||||
- Error logs and screenshots (if applicable)
|
|
||||||
- Your app.yaml configuration (if relevant)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Developer Details
|
|
||||||
Coding Guidelines:
|
|
||||||
Follow PEP 8 for Python code style.
|
|
||||||
Include type annotations for all public functions and methods.
|
|
||||||
Write clear docstrings for modules, classes, and functions.
|
|
||||||
Use descriptive commit messages and reference issue numbers when relevant.
|
|
||||||
Contributing
|
|
||||||
All contributions must be reviewed via pull requests.
|
|
||||||
Run all tests and ensure code passes lint checks before submission.
|
|
|
@ -1,20 +1,16 @@
|
||||||
general:
|
|
||||||
# Run mode. Options are 'prod' or 'dev'.
|
|
||||||
run_mode: prod
|
|
||||||
|
|
||||||
dataset:
|
dataset:
|
||||||
#number of slices you want to split each recording into
|
# Seed for the random number generator, used for signal generation
|
||||||
num_slices: 8
|
seed: 42
|
||||||
|
|
||||||
#training/val split between the 2 data sets
|
# Number of samples per recording
|
||||||
train_split: 0.8
|
recording_length: 1024
|
||||||
val_split : 0.2
|
|
||||||
|
|
||||||
#used to initialize a random number generator.
|
# List of signal modulation schemes to include in the dataset
|
||||||
seed: 25
|
modulation_types:
|
||||||
|
- bpsk
|
||||||
#multiple modulations to contain in the dataset
|
- qpsk
|
||||||
modulation_types: [bpsk, qpsk, qam16, qam64]
|
- qam16
|
||||||
|
- qam64
|
||||||
|
|
||||||
# Rolloff factor for pulse shaping filter (0 < beta <= 1)
|
# Rolloff factor for pulse shaping filter (0 < beta <= 1)
|
||||||
beta: 0.3
|
beta: 0.3
|
||||||
|
@ -23,20 +19,18 @@ dataset:
|
||||||
sps: 4
|
sps: 4
|
||||||
|
|
||||||
# SNR sweep range: start, stop (exclusive), and step (in dB)
|
# SNR sweep range: start, stop (exclusive), and step (in dB)
|
||||||
snr_start: -6 # Start value of SNR sweep (in dB)
|
snr_start: -6
|
||||||
snr_stop: 13 # Stop value (exclusive) of SNR sweep (in dB)
|
snr_stop: 13
|
||||||
snr_step: 3 # Step size for SNR sweep (in dB)
|
snr_step: 3
|
||||||
|
|
||||||
# Number of iterations (samples) per modulation and SNR combination
|
# Number of iterations (signal recordings) per modulation and SNR combination
|
||||||
num_iterations: 3
|
num_iterations: 3
|
||||||
|
|
||||||
# Number of samples per generated recording
|
# Modulation scheme settings; keys must match the `modulation_types` list above
|
||||||
recording_length: 1024
|
# Each entry includes:
|
||||||
|
# - num_bits_per_symbol: bits encoded per symbol (e.g., 1 for BPSK, 4 for 16-QAM)
|
||||||
# Settings for each modulation scheme
|
# - constellation_type: modulation category (e.g., "psk", "qam", "fsk", "ofdm")
|
||||||
# Keys must match entries in `modulation_types`
|
# TODO: Combine entries for 'modulation_types' and 'modulation_settings'
|
||||||
# - `num_bits_per_symbol`: how many bits each symbol encodes (e.g., 1 for BPSK, 4 for 16-QAM)
|
|
||||||
# - `constellation_type`: type of modulation (e.g., "psk", "qam", "fsk", "ofdm")
|
|
||||||
modulation_settings:
|
modulation_settings:
|
||||||
bpsk:
|
bpsk:
|
||||||
num_bits_per_symbol: 1
|
num_bits_per_symbol: 1
|
||||||
|
@ -51,20 +45,25 @@ dataset:
|
||||||
num_bits_per_symbol: 6
|
num_bits_per_symbol: 6
|
||||||
constellation_type: qam
|
constellation_type: qam
|
||||||
|
|
||||||
|
# Number of slices to cut from each recording
|
||||||
|
num_slices: 8
|
||||||
|
|
||||||
|
# Training and validation split ratios; must sum to 1
|
||||||
|
train_split: 0.8
|
||||||
|
val_split : 0.2
|
||||||
|
|
||||||
training:
|
training:
|
||||||
# Number of training samples processed together before the model updates its weights
|
# Number of training examples processed together before the model updates its weights
|
||||||
batch_size: 256
|
batch_size: 256
|
||||||
|
|
||||||
# Number of complete passes through the training dataset during training
|
# Number of complete passes through the training dataset during training
|
||||||
epochs: 5
|
epochs: 5
|
||||||
|
|
||||||
# Learning rate: how much weights are updated after every batch
|
# Learning rate: step size for weight updates after each batch
|
||||||
# Suggested range for fine-tuning: 1e-6 to 1e-4
|
# Recommended range for fine-tuning: 1e-6 to 1e-4
|
||||||
learning_rate: 1e-4
|
learning_rate: 1e-4
|
||||||
|
|
||||||
# Whether to use GPU acceleration for training (if available)
|
# Enable GPU acceleration for training if available
|
||||||
use_gpu: true
|
use_gpu: true
|
||||||
|
|
||||||
# Dropout rate for individual neurons/layers (probability of dropping out a unit)
|
# Dropout rate for individual neurons/layers (probability of dropping out a unit)
|
||||||
|
@ -73,13 +72,12 @@ training:
|
||||||
# Drop path rate: probability of dropping entire residual paths (stochastic depth)
|
# Drop path rate: probability of dropping entire residual paths (stochastic depth)
|
||||||
drop_path_rate: 0.2
|
drop_path_rate: 0.2
|
||||||
|
|
||||||
# Weight decay (L2 regularization) to help prevent overfitting
|
# Weight decay (L2 regularization) coefficient to help prevent overfitting
|
||||||
wd: 0.01
|
wd: 0.01
|
||||||
|
|
||||||
|
|
||||||
app:
|
app:
|
||||||
# Optimization style for ORT conversion. Options: 'Fixed', 'None'
|
# Optimization style for ORT conversion; options: 'Fixed', 'None'
|
||||||
optimization_style: Fixed
|
optimization_style: "Fixed"
|
||||||
|
|
||||||
# Target platform architecture. Common options: 'amd64', 'arm64'
|
# Target platform architecture; common options: 'amd64', 'arm64'
|
||||||
target_platform: amd64
|
target_platform: "amd64"
|
||||||
|
|
|
@ -2,8 +2,8 @@ import os
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
from scripts.training.mobilenetv3 import RFClassifier, mobilenetv3
|
||||||
|
|
||||||
from scripts.training.mobilenetv3 import mobilenetv3, RFClassifier
|
|
||||||
from helpers.app_settings import get_app_settings
|
from helpers.app_settings import get_app_settings
|
||||||
|
|
||||||
|
|
||||||
|
@ -12,8 +12,8 @@ def convert_to_onnx(ckpt_path: str, fp16: bool = False) -> None:
|
||||||
Convert a PyTorch model to ONNX format.
|
Convert a PyTorch model to ONNX format.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
output_path (str): The path to save the converted ONNX model.
|
ckpt_path (str): The path to save the converted ONNX model.
|
||||||
fp16 (bool): 16 float point percision
|
fp16 (bool): 16 float point precision
|
||||||
"""
|
"""
|
||||||
settings = get_app_settings()
|
settings = get_app_settings()
|
||||||
|
|
||||||
|
@ -68,8 +68,6 @@ def convert_to_onnx(ckpt_path: str, fp16: bool = False) -> None:
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
settings = get_app_settings()
|
|
||||||
|
|
||||||
model_checkpoint = "inference_recognition_model.ckpt"
|
model_checkpoint = "inference_recognition_model.ckpt"
|
||||||
|
|
||||||
print("Converting to ONNX...")
|
print("Converting to ONNX...")
|
|
@ -1,4 +1,5 @@
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from helpers.app_settings import get_app_settings
|
from helpers.app_settings import get_app_settings
|
||||||
|
|
||||||
settings = get_app_settings()
|
settings = get_app_settings()
|
|
@ -1,9 +1,9 @@
|
||||||
import onnxruntime as ort
|
import json
|
||||||
import numpy as np
|
|
||||||
from helpers.app_settings import get_app_settings
|
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import json
|
|
||||||
|
import numpy as np
|
||||||
|
import onnxruntime as ort
|
||||||
|
|
||||||
|
|
||||||
def profile_onnx_model(
|
def profile_onnx_model(
|
||||||
|
@ -84,6 +84,5 @@ def profile_onnx_model(
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
settings = get_app_settings()
|
|
||||||
output_path = os.path.join("onnx_files", "inference_recognition_model.onnx")
|
output_path = os.path.join("onnx_files", "inference_recognition_model.onnx")
|
||||||
profile_onnx_model(output_path)
|
profile_onnx_model(output_path)
|
|
@ -1,7 +1,9 @@
|
||||||
from utils.data import Recording
|
|
||||||
import numpy as np
|
|
||||||
from utils.signal import block_generator
|
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from utils.data import Recording
|
||||||
|
from utils.signal import block_generator
|
||||||
|
|
||||||
from helpers.app_settings import get_app_settings
|
from helpers.app_settings import get_app_settings
|
||||||
|
|
||||||
settings = get_app_settings().dataset
|
settings = get_app_settings().dataset
|
|
@ -1,7 +1,11 @@
|
||||||
import os, h5py, numpy as np
|
import os
|
||||||
from typing import List
|
from typing import List
|
||||||
from utils.io import from_npy
|
|
||||||
|
import h5py
|
||||||
|
import numpy as np
|
||||||
from split_dataset import split, split_recording
|
from split_dataset import split, split_recording
|
||||||
|
from utils.io import from_npy
|
||||||
|
|
||||||
from helpers.app_settings import DataSetConfig, get_app_settings
|
from helpers.app_settings import DataSetConfig, get_app_settings
|
||||||
|
|
||||||
meta_dtype = np.dtype(
|
meta_dtype = np.dtype(
|
||||||
|
@ -46,8 +50,6 @@ def write_hdf5_file(records: List, output_path: str, dataset_name: str = "data")
|
||||||
)
|
)
|
||||||
|
|
||||||
first_rec, _ = records[0] # records[0] is a tuple of (data, md)
|
first_rec, _ = records[0] # records[0] is a tuple of (data, md)
|
||||||
sample = first_rec
|
|
||||||
shape, dtype = sample.shape, sample.dtype
|
|
||||||
|
|
||||||
with h5py.File(output_path, "w") as hf:
|
with h5py.File(output_path, "w") as hf:
|
||||||
data_arr = np.stack([rec[0] for rec in records])
|
data_arr = np.stack([rec[0] for rec in records])
|
|
@ -1,6 +1,7 @@
|
||||||
import random
|
import random
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from typing import List, Tuple, Dict
|
from typing import Dict, List, Tuple
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import numpy as np
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
from sklearn.metrics import confusion_matrix
|
from sklearn.metrics import confusion_matrix
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
import timm
|
|
||||||
from torch import nn
|
|
||||||
import lightning as L
|
import lightning as L
|
||||||
|
import numpy as np
|
||||||
|
import timm
|
||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
|
||||||
sizes = [
|
sizes = [
|
||||||
"mobilenetv3_large_075",
|
"mobilenetv3_large_075",
|
||||||
|
@ -24,11 +24,9 @@ class SqueezeExcite(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
in_chs,
|
in_chs,
|
||||||
se_ratio=0.25,
|
|
||||||
reduced_base_chs=None,
|
reduced_base_chs=None,
|
||||||
act_layer=nn.SiLU,
|
act_layer=nn.SiLU,
|
||||||
gate_fn=torch.sigmoid,
|
gate_fn=torch.sigmoid,
|
||||||
divisor=1,
|
|
||||||
**_,
|
**_,
|
||||||
):
|
):
|
||||||
super(SqueezeExcite, self).__init__()
|
super(SqueezeExcite, self).__init__()
|
||||||
|
@ -77,13 +75,6 @@ class GBN(torch.nn.Module):
|
||||||
self.act = act
|
self.act = act
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
# chunks = x.chunk(int(np.ceil(x.shape[0] / self.virtual_batch_size)), 0)
|
|
||||||
# res = [self.bn(x_) for x_ in chunks]
|
|
||||||
# return self.drop(self.act(torch.cat(res, dim=0)))
|
|
||||||
# x = self.bn(x)
|
|
||||||
# x = self.act(x)
|
|
||||||
# x = self.drop(x)
|
|
||||||
# return x
|
|
||||||
return self.drop(self.act(self.bn(x)))
|
return self.drop(self.act(self.bn(x)))
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
import sys, os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
sys.path.insert(0, os.path.abspath("../..")) # or ".." if needed
|
sys.path.insert(0, os.path.abspath("../..")) # or ".." if needed
|
||||||
|
import h5py
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from torch.utils.data import Dataset
|
from torch.utils.data import Dataset
|
||||||
import h5py
|
|
||||||
from helpers.app_settings import get_app_settings
|
from helpers.app_settings import get_app_settings
|
||||||
|
|
||||||
settings = get_app_settings()
|
settings = get_app_settings()
|
|
@ -1,16 +1,17 @@
|
||||||
import os
|
import os
|
||||||
import torch
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import torch
|
||||||
from sklearn.metrics import classification_report
|
from sklearn.metrics import classification_report
|
||||||
|
|
||||||
os.environ["NNPACK"] = "0"
|
os.environ["NNPACK"] = "0"
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
|
|
||||||
from scripts.training.mobilenetv3 import mobilenetv3, RFClassifier
|
|
||||||
from helpers.app_settings import get_app_settings
|
|
||||||
from cm_plotter import plot_confusion_matrix
|
from cm_plotter import plot_confusion_matrix
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
from scripts.training.mobilenetv3 import RFClassifier, mobilenetv3
|
||||||
from scripts.training.modulation_dataset import ModulationH5Dataset
|
from scripts.training.modulation_dataset import ModulationH5Dataset
|
||||||
|
|
||||||
|
from helpers.app_settings import get_app_settings
|
||||||
|
|
||||||
|
|
||||||
def load_validation_data():
|
def load_validation_data():
|
||||||
val_dataset = ModulationH5Dataset(
|
val_dataset = ModulationH5Dataset(
|
||||||
|
@ -141,5 +142,4 @@ def plot_confusion_matrix_with_counts(
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
settings = get_app_settings()
|
settings = get_app_settings()
|
||||||
ckpt_path = os.path.join("checkpoint_files", "inference_recognition_model.ckpt")
|
evaluate_checkpoint(os.path.join("checkpoint_files", "inference_recognition_model.ckpt"))
|
||||||
evaluate_checkpoint(ckpt_path)
|
|
|
@ -1,14 +1,16 @@
|
||||||
import sys, os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
os.environ["NNPACK"] = "0"
|
os.environ["NNPACK"] = "0"
|
||||||
import lightning as L
|
import lightning as L
|
||||||
from lightning.pytorch.callbacks import ModelCheckpoint
|
import mobilenetv3
|
||||||
import torch
|
import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
import torchmetrics
|
import torchmetrics
|
||||||
from helpers.app_settings import get_app_settings
|
from lightning.pytorch.callbacks import ModelCheckpoint
|
||||||
from modulation_dataset import ModulationH5Dataset
|
from modulation_dataset import ModulationH5Dataset
|
||||||
import mobilenetv3
|
|
||||||
|
from helpers.app_settings import get_app_settings
|
||||||
|
|
||||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
data_dir = os.path.abspath(os.path.join(script_dir, ".."))
|
data_dir = os.path.abspath(os.path.join(script_dir, ".."))
|
Loading…
Reference in New Issue
Block a user