Compare commits
3 Commits
main
...
agent-nami
| Author | SHA1 | Date | |
|---|---|---|---|
|
J
|
99447a581a | ||
|
J
|
2f6b5ced18 | ||
|
J
|
eb5b4ce839 |
|
|
@ -159,7 +159,7 @@ Finally, RIA Toolkit OSS can be installed directly from the source code. This ap
|
|||
Once the project is installed, you can import modules, functions, and classes from the Toolkit for use in your Python code. For example, you can use the following import statement to access the `Recording` object:
|
||||
|
||||
```python
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
```
|
||||
|
||||
Additional usage information is provided in the project documentation: [RIA Toolkit OSS Documentation](https://ria-toolkit-oss.readthedocs.io/).
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))
|
|||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = 'ria-toolkit-oss'
|
||||
copyright = '2026, Qoherent Inc'
|
||||
copyright = '2025, Qoherent Inc'
|
||||
author = 'Qoherent Inc.'
|
||||
release = '0.1.5'
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
.. _sdr_examples:
|
||||
.. _examples:
|
||||
|
||||
############
|
||||
SDR Examples
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ In this example, we initialize the `Blade` SDR, configure it to record a signal
|
|||
|
||||
import time
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.sdr.blade import Blade
|
||||
|
||||
my_radio = Blade()
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ Code
|
|||
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.sdr.blade import Blade
|
||||
|
||||
# Parameters
|
||||
|
|
|
|||
|
|
@ -1027,7 +1027,7 @@ For quick non-CLI use:
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.io import load_recording, to_sigmf
|
||||
from ria_toolkit_oss.transforms import iq_augmentations, iq_impairments
|
||||
|
||||
|
|
|
|||
|
|
@ -11,15 +11,15 @@ The Radio Dataset Framework provides a software interface to access and manipula
|
|||
the need for users to interface with the source files directly. Instead, users initialize and interact with a Python
|
||||
object, while the complexities of efficient data retrieval and source file manipulation are managed behind the scenes.
|
||||
|
||||
Ria Toolkit OSS includes an abstract class called :py:obj:`ria_toolkit_oss.data.datasets.RadioDataset`, which defines common properties and
|
||||
behaviors for all radio datasets. :py:obj:`ria_toolkit_oss.data.datasets.RadioDataset` can be considered a blueprint for all
|
||||
Ria Toolkit OSS includes an abstract class called :py:obj:`ria_toolkit_oss.datatypes.datasets.RadioDataset`, which defines common properties and
|
||||
behaviors for all radio datasets. :py:obj:`ria_toolkit_oss.datatypes.datasets.RadioDataset` can be considered a blueprint for all
|
||||
other radio dataset classes. This class is then subclassed to define more specific blueprints for different types
|
||||
of radio datasets. For example, :py:obj:`ria_toolkit_oss.data.datasets.IQDataset`, which is tailored for machine learning tasks
|
||||
of radio datasets. For example, :py:obj:`ria_toolkit_oss.datatypes.datasets.IQDataset`, which is tailored for machine learning tasks
|
||||
involving the processing of signals represented as IQ (In-phase and Quadrature) samples.
|
||||
|
||||
Then, in the various project backends, there are concrete dataset classes, which inherit from both Ria Toolkit OSS and the base
|
||||
dataset class from the respective backend. For example, the :py:obj:`TorchIQDataset` class extends both
|
||||
:py:obj:`ria_toolkit_oss.data.datasets.IQDataset` from Ria Toolkit OSS and :py:obj:`torch.ria_toolkit_oss.data.IterableDataset` from
|
||||
:py:obj:`ria_toolkit_oss.datatypes.datasets.IQDataset` from Ria Toolkit OSS and :py:obj:`torch.ria_toolkit_oss.datatypes.IterableDataset` from
|
||||
PyTorch, providing a concrete dataset class tailored for IQ datasets and optimized for the PyTorch backend.
|
||||
|
||||
Dataset initialization
|
||||
|
|
@ -130,7 +130,7 @@ Dataset processing and manipulation
|
|||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
All radio datasets support methods tailored specifically for radio processing. These methods are backend-independent,
|
||||
inherited from the blueprints in Ria Toolkit OSS like :py:obj:`ria_toolkit_oss.data.datasets.RadioDataset`.
|
||||
inherited from the blueprints in Ria Toolkit OSS like :py:obj:`ria_toolkit_oss.datatypes.datasets.RadioDataset`.
|
||||
|
||||
For example, we can trim down the length of the examples from 1,024 to 512 samples, and then augment the dataset:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
Dataset License SubModule
|
||||
=========================
|
||||
|
||||
.. automodule:: ria_toolkit_oss.data.datasets.license
|
||||
.. automodule:: ria_toolkit_oss.datatypes.datasets.license
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
Datatypes Package (ria_toolkit_oss.data)
|
||||
Datatypes Package (ria_toolkit_oss.datatypes)
|
||||
=============================================
|
||||
|
||||
.. |br| raw:: html
|
||||
|
||||
<br />
|
||||
|
||||
.. automodule:: ria_toolkit_oss.data
|
||||
.. automodule:: ria_toolkit_oss.datatypes
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
|
@ -13,7 +13,7 @@ Datatypes Package (ria_toolkit_oss.data)
|
|||
Radio Dataset SubPackage
|
||||
------------------------
|
||||
|
||||
.. automodule:: ria_toolkit_oss.data.datasets
|
||||
.. automodule:: ria_toolkit_oss.datatypes.datasets
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
|
@ -21,5 +21,5 @@ Radio Dataset SubPackage
|
|||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
Dataset License SubModule <ria_toolkit_oss.data.datasets.license>
|
||||
Dataset License SubModule <ria_toolkit_oss.datatypes.datasets.license>
|
||||
Radio Datasets <radio_datasets>
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ class and function signatures, and doctest examples where available.
|
|||
:maxdepth: 2
|
||||
:caption: Contents:
|
||||
|
||||
Data Package <data/ria_toolkit_oss.data>
|
||||
Datatypes Package <datatypes/ria_toolkit_oss.datatypes>
|
||||
SDR Package <ria_toolkit_oss.sdr>
|
||||
IO Package <ria_toolkit_oss.io>
|
||||
Transforms Package <ria_toolkit_oss.transforms>
|
||||
|
|
|
|||
|
|
@ -1,87 +1,77 @@
|
|||
.. _blade:
|
||||
|
||||
BladeRF
|
||||
=======
|
||||
|
||||
The BladeRF is a versatile software-defined radio (SDR) platform developed by Nuand. It is designed for a wide
|
||||
range of applications, from wireless communication research to field deployments. BladeRF devices are known
|
||||
for their high performance, flexibility, and extensive open-source support, making them suitable for both
|
||||
hobbyists and professionals. The BladeRF is based on the Analog Devices AD9361 RF transceiver, which provides
|
||||
wide frequency coverage and high bandwidth.
|
||||
|
||||
Supported Models
|
||||
----------------
|
||||
|
||||
- **BladeRF 2.0 Micro xA4:** A compact model with a 49 kLE FPGA, ideal for portable applications.
|
||||
- **BladeRF 2.0 Micro xA9:** A higher-end version of the Micro with a 115 kLE FPGA, offering more processing power in a small form factor.
|
||||
|
||||
Key Features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** Typically from 47 MHz to 6 GHz, covering a wide range of wireless communication bands.
|
||||
- **Bandwidth:** Up to 56 MHz, allowing for wideband signal processing.
|
||||
- **FPGA:** Integrated FPGA (varies by model) for real-time processing and custom logic development.
|
||||
- **Connectivity:** USB 3.0 interface for high-speed data transfer, with options for GPIO, SPI, and other I/O.
|
||||
|
||||
Hackability
|
||||
-----------
|
||||
|
||||
- **Expansion:** The BladeRF features GPIO, expansion headers, and add-on boards, allowing users to extend the
|
||||
functionality of the device for specific applications, such as additional RF front ends.
|
||||
- **Frequency and Bandwidth Modification:** Advanced users can modify the BladeRF's settings and firmware to
|
||||
explore different frequency bands and optimize the bandwidth for their specific use cases.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- The complexity of FPGA development may present a steep learning curve for users unfamiliar with hardware
|
||||
description languages (HDL).
|
||||
- Bandwidth is capped at 56 MHz, which might not be sufficient for ultra-wideband applications.
|
||||
- USB 3.0 connectivity is required for optimal performance; using USB 2.0 will significantly limit data
|
||||
transfer rates.
|
||||
|
||||
Set up instructions (Linux)
|
||||
---------------------------
|
||||
|
||||
No additional Python packages are required for BladeRF beyond the base RIA Toolkit OSS installation.
|
||||
|
||||
1. Install the system library:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt install libbladerf-dev
|
||||
|
||||
For a more complete installation including CLI tools and FPGA images, use the Nuand PPA:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo add-apt-repository ppa:nuandllc/bladerf
|
||||
sudo apt-get update
|
||||
sudo apt-get install bladerf libbladerf-dev
|
||||
sudo apt-get install bladerf-fpga-hostedxa4 # Necessary for BladeRF 2.0 Micro xA4
|
||||
|
||||
2. Install udev rules:
|
||||
|
||||
For most users:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
For **Radioconda** users, create symlinks from your conda environment instead:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/88-nuand-bladerf1.rules /etc/udev/rules.d/88-radioconda-nuand-bladerf1.rules
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/88-nuand-bladerf2.rules /etc/udev/rules.d/88-radioconda-nuand-bladerf2.rules
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/88-nuand-bootloader.rules /etc/udev/rules.d/88-radioconda-nuand-bootloader.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
Further Information
|
||||
-------------------
|
||||
|
||||
- `Official BladeRF Website <https://www.nuand.com/>`_
|
||||
- `BladeRF GitHub Repository <https://github.com/Nuand/bladeRF>`_
|
||||
- `BladeRF Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#bladerf>`_
|
||||
.. _blade:
|
||||
|
||||
BladeRF
|
||||
=======
|
||||
|
||||
The BladeRF is a versatile software-defined radio (SDR) platform developed by Nuand. It is designed for a wide
|
||||
range of applications, from wireless communication research to field deployments. BladeRF devices are known
|
||||
for their high performance, flexibility, and extensive open-source support, making them suitable for both
|
||||
hobbyists and professionals. The BladeRF is based on the Analog Devices AD9361 RF transceiver, which provides
|
||||
wide frequency coverage and high bandwidth.
|
||||
|
||||
Supported Models
|
||||
----------------
|
||||
|
||||
- **BladeRF 2.0 Micro xA4:** A compact model with a 49 kLE FPGA, ideal for portable applications.
|
||||
- **BladeRF 2.0 Micro xA9:** A higher-end version of the Micro with a 115 kLE FPGA, offering more processing power in a small form factor.
|
||||
|
||||
Key Features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** Typically from 47 MHz to 6 GHz, covering a wide range of wireless communication bands.
|
||||
- **Bandwidth:** Up to 56 MHz, allowing for wideband signal processing.
|
||||
- **FPGA:** Integrated FPGA (varies by model) for real-time processing and custom logic development.
|
||||
- **Connectivity:** USB 3.0 interface for high-speed data transfer, with options for GPIO, SPI, and other I/O.
|
||||
|
||||
Hackability
|
||||
-----------
|
||||
|
||||
- **Expansion:** The BladeRF features GPIO, expansion headers, and add-on boards, allowing users to extend the
|
||||
functionality of the device for specific applications, such as additional RF front ends.
|
||||
- **Frequency and Bandwidth Modification:** Advanced users can modify the BladeRF's settings and firmware to
|
||||
explore different frequency bands and optimize the bandwidth for their specific use cases.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- The complexity of FPGA development may present a steep learning curve for users unfamiliar with hardware
|
||||
description languages (HDL).
|
||||
- Bandwidth is capped at 56 MHz, which might not be sufficient for ultra-wideband applications.
|
||||
- USB 3.0 connectivity is required for optimal performance; using USB 2.0 will significantly limit data
|
||||
transfer rates.
|
||||
|
||||
Set up instructions (Linux, Radioconda)
|
||||
---------------------------------------
|
||||
|
||||
1. Activate your Radioconda environment.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda activate <your-env-name>
|
||||
|
||||
2. Install the base dependencies and drivers (*Easy method*):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo add-apt-repository ppa:nuandllc/bladerf
|
||||
sudo apt-get update
|
||||
sudo apt-get install bladerf
|
||||
sudo apt-get install libbladerf-dev
|
||||
sudo apt-get install bladerf-fpga-hostedxa4 # Necessary for installation of bladeRF 2.0 Micro A4.
|
||||
|
||||
3. Install a ``udev`` rule by creating a link into your Radioconda installation:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/88-nuand-bladerf1.rules /etc/udev/rules.d/88-radioconda-nuand-bladerf1.rules
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/88-nuand-bladerf2.rules /etc/udev/rules.d/88-radioconda-nuand-bladerf2.rules
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/88-nuand-bootloader.rules /etc/udev/rules.d/88-radioconda-nuand-bootloader.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
Further Information
|
||||
-------------------
|
||||
|
||||
- `Official BladeRF Website <https://www.nuand.com/>`_
|
||||
- `BladeRF GitHub Repository <https://github.com/Nuand/bladeRF>`_
|
||||
- `BladeRF Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#bladerf>`_
|
||||
|
|
|
|||
|
|
@ -1,88 +1,83 @@
|
|||
.. _hackrf:
|
||||
|
||||
HackRF
|
||||
======
|
||||
|
||||
The HackRF One is a portable and affordable software-defined radio developed by Great Scott Gadgets. It is an
|
||||
open source hardware platform that is designed to enable test and development of modern and next generation
|
||||
radio technologies.
|
||||
|
||||
The HackRF is based on the Analog Devices MAX2839 transceiver chip, which supports both transmission and
|
||||
reception of signals across a wide frequency range, combined with a MAX5864 RF front-end chip and a
|
||||
RFFC5072 wideband synthesizer/VCO.
|
||||
|
||||
Supported models
|
||||
----------------
|
||||
|
||||
- **HackRF One:** The standard model with a frequency range of 1 MHz to 6 GHz and a bandwidth of up to 20 MHz.
|
||||
- **Opera Cake for HackRF:** An antenna switching add-on board for HackRF One that is configured with command-line software.
|
||||
|
||||
Key features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** 1 MHz to 6 GHz.
|
||||
- **Bandwidth:** 2 MHz to 20 MHz.
|
||||
- **Connectivity:** USB 2.0 interface with support for power, data, and firmware updates.
|
||||
- **Software Support:** Compatible with GNU Radio, SDR#, and other SDR frameworks.
|
||||
- **Onboard Processing:** ARM-based LPC4320 processor for digital signal processing and interfacing over USB.
|
||||
|
||||
Hackability
|
||||
-----------
|
||||
|
||||
.. todo::
|
||||
|
||||
Add information regarding HackRF hackability
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Bandwidth is limited to 20 MHz.
|
||||
- USB 2.0 connectivity might limit data transfer rates compared to USB 3.0 or Ethernet-based SDRs.
|
||||
|
||||
Set up instructions (Linux)
|
||||
---------------------------
|
||||
|
||||
HackRF is supported out of the box after installing RIA Toolkit OSS.
|
||||
|
||||
1. Ensure ``libhackrf`` is installed at the system level. On most Ubuntu installations this is already
|
||||
present. If not:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt install libhackrf-dev
|
||||
|
||||
2. Install udev rules to allow non-root device access:
|
||||
|
||||
For most users:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
For **Radioconda** users, create a symlink from your conda environment instead:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/53-hackrf.rules /etc/udev/rules.d/53-radioconda-hackrf.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
Make sure your user account belongs to the ``plugdev`` group in order to access your device:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo usermod -a -G plugdev <user>
|
||||
|
||||
.. note::
|
||||
|
||||
You may have to restart your system for group membership changes to take effect.
|
||||
|
||||
Further information
|
||||
-------------------
|
||||
|
||||
- `Official HackRF Website <https://greatscottgadgets.com/hackrf/>`_
|
||||
- `HackRF Project Documentation <https://hackrf.readthedocs.io/en/latest/>`_
|
||||
- `HackRF Software Installation Guide <https://hackrf.readthedocs.io/en/latest/installing_hackrf_software.html>`_
|
||||
- `HackRF GitHub Repository <https://github.com/greatscottgadgets/hackrf>`_
|
||||
- `HackRF Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#hackrf>`_
|
||||
.. _hackrf:
|
||||
|
||||
HackRF
|
||||
======
|
||||
|
||||
The HackRF One is a portable and affordable software-defined radio developed by Great Scott Gadgets. It is an
|
||||
open source hardware platform that is designed to enable test and development of modern and next generation
|
||||
radio technologies.
|
||||
|
||||
The HackRF is based on the Analog Devices MAX2839 transceiver chip, which supports both transmission and
|
||||
reception of signals across a wide frequency range, combined with a MAX5864 RF front-end chip and a
|
||||
RFFC5072 wideband synthesizer/VCO.
|
||||
|
||||
Supported models
|
||||
----------------
|
||||
|
||||
- **HackRF One:** The standard model with a frequency range of 1 MHz to 6 GHz and a bandwidth of up to 20 MHz.
|
||||
- **Opera Cake for HackRF:** An antenna switching add-on board for HackRF One that is configured with command-line software.
|
||||
|
||||
Key features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** 1 MHz to 6 GHz.
|
||||
- **Bandwidth:** 2 MHz to 20 MHz.
|
||||
- **Connectivity:** USB 2.0 interface with support for power, data, and firmware updates.
|
||||
- **Software Support:** Compatible with GNU Radio, SDR#, and other SDR frameworks.
|
||||
- **Onboard Processing:** ARM-based LPC4320 processor for digital signal processing and interfacing over USB.
|
||||
|
||||
Hackability
|
||||
-----------
|
||||
|
||||
.. todo::
|
||||
|
||||
Add information regarding HackRF hackability
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Bandwidth is limited to 20 MHz.
|
||||
- USB 2.0 connectivity might limit data transfer rates compared to USB 3.0 or Ethernet-based SDRs.
|
||||
|
||||
Set up instructions (Linux, Radioconda)
|
||||
---------------------------------------
|
||||
|
||||
1. Activate your Radioconda environment:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda activate <your-env-name>
|
||||
|
||||
2. Install the System Package (Ubuntu / Debian):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install hackrf
|
||||
|
||||
3. Install a ``udev`` rule by creating a link into your Radioconda installation:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/53-hackrf.rules /etc/udev/rules.d/53-radioconda-hackrf.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
Make sure your user account belongs to the plugdev group in order to access your device:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo usermod -a -G plugdev <user>
|
||||
|
||||
.. note::
|
||||
|
||||
You may have to restart your system for changes to take effect.
|
||||
|
||||
Further information
|
||||
-------------------
|
||||
|
||||
- `Official HackRF Website <https://greatscottgadgets.com/hackrf/>`_
|
||||
- `HackRF Project Documentation <https://hackrf.readthedocs.io/en/latest/>`_
|
||||
- `HackRF Software Installation Guide <https://hackrf.readthedocs.io/en/latest/installing_hackrf_software.html>`_
|
||||
- `HackRF GitHub Repository <https://github.com/greatscottgadgets/hackrf>`_
|
||||
- `HackRF Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#hackrf>`_
|
||||
|
|
|
|||
|
|
@ -1,123 +1,116 @@
|
|||
.. _pluto:
|
||||
|
||||
PlutoSDR
|
||||
========
|
||||
|
||||
The ADALM-PLUTO (PlutoSDR) is a portable and affordable software-defined radio developed by Analog Devices.
|
||||
It is designed for learning, experimenting, and prototyping in the field of wireless communication. The PlutoSDR
|
||||
is popular among students, educators, and hobbyists due to its versatility and ease of use.
|
||||
|
||||
The PlutoSDR is based on the AD9363 transceiver chip, which supports both transmission and reception of signals
|
||||
across a wide frequency range. The device is supported by a robust open-source ecosystem, making it ideal for
|
||||
hands-on learning and rapid prototyping.
|
||||
|
||||
Supported models
|
||||
----------------
|
||||
|
||||
- **ADALM-PLUTO:** The standard model with a frequency range of 325 MHz to 3.8 GHz and a bandwidth of up to 20 MHz.
|
||||
- **Modified ADALM-PLUTO:** Some users modify their PlutoSDR to extend the frequency range to approximately 70 MHz
|
||||
to 6 GHz by applying firmware patches with unqualified RF performance.
|
||||
|
||||
Key features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** 325 MHz to 3.8 GHz (standard), expandable with modifications.
|
||||
- **Bandwidth:** Up to 20 MHz, can be increased to 56 MHz with firmware modifications.
|
||||
- **Connectivity:** USB 2.0 interface with support for power, data, and firmware updates.
|
||||
- **Software Support:** Compatible with GNU Radio, MATLAB, Simulink, and other SDR frameworks.
|
||||
- **Onboard Processing:** Integrated ARM Cortex-A9 processor for custom applications and signal processing.
|
||||
|
||||
Hackability
|
||||
------------
|
||||
|
||||
- **Frequency Range and Bandwidth:** The default frequency range of 325 MHz to 3.8 GHz can be expanded to
|
||||
approximately 70 MHz to 6 GHz, and the bandwidth can be increased from 20 MHz to 56 MHz by modifying
|
||||
the device's firmware.
|
||||
- **2x2 MIMO:** On Rev C models, users can unlock 2x2 MIMO (Multiple Input Multiple Output) functionality by
|
||||
wiring UFL to SMA connectors to the device's PCB, effectively turning the device into a dual-channel SDR.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Bandwidth is limited to 20 MHz by default, but can be increased to 56 MHz with modifications, which may
|
||||
affect stability.
|
||||
- USB 2.0 connectivity might limit data transfer rates compared to USB 3.0 or Ethernet-based SDRs.
|
||||
|
||||
Set up instructions (Linux)
|
||||
---------------------------
|
||||
|
||||
The PlutoSDR is supported out of the box after installing RIA Toolkit OSS. The required Python package
|
||||
(``pyadi-iio``) is included in the toolkit's dependencies.
|
||||
|
||||
1. Ensure ``libiio`` is installed at the system level. On most Ubuntu installations this is already present.
|
||||
If not:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt install libiio-dev libiio-utils libiio0
|
||||
|
||||
.. note::
|
||||
|
||||
PlutoSDR devices are discoverable over both USB and network (mDNS). Network discovery uses Avahi — if
|
||||
``avahi-daemon`` is not running, network discovery will be skipped but USB discovery still works.
|
||||
|
||||
2. Install a ``udev`` rule to allow non-root device access:
|
||||
|
||||
For most users:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
For **Radioconda** users, create a symlink from your conda environment instead:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/90-libiio.rules /etc/udev/rules.d/90-radioconda-libiio.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
Once you can communicate with the hardware, you may want to perform the post-install steps detailed on
|
||||
the `PlutoSDR Documentation <https://wiki.analog.com/university/tools/pluto>`_.
|
||||
|
||||
3. (Optional) Building ``libiio`` or ``libad9361-iio`` from source:
|
||||
|
||||
This step is only required if you need a version not available via ``apt``. First install build
|
||||
dependencies:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get install -y build-essential git libxml2-dev bison flex libcdk5-dev cmake \
|
||||
libusb-1.0-0-dev libavahi-client-dev libavahi-common-dev libaio-dev
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Build libiio from source
|
||||
cd ~
|
||||
git clone --branch v0.23 https://github.com/analogdevicesinc/libiio.git
|
||||
cd libiio
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake -DPYTHON_BINDINGS=ON ..
|
||||
make -j"$(nproc)"
|
||||
sudo make install
|
||||
sudo ldconfig
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Build libad9361-iio from source
|
||||
cd ~
|
||||
git clone https://github.com/analogdevicesinc/libad9361-iio.git
|
||||
cd libad9361-iio
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j"$(nproc)"
|
||||
sudo make install
|
||||
|
||||
Further information
|
||||
-------------------
|
||||
|
||||
- `PlutoSDR Documentation <https://wiki.analog.com/university/tools/pluto>`_
|
||||
- `PlutoSDR Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#iio-pluto-sdr>`_
|
||||
.. _pluto:
|
||||
|
||||
PlutoSDR
|
||||
========
|
||||
|
||||
The ADALM-PLUTO (PlutoSDR) is a portable and affordable software-defined radio developed by Analog Devices.
|
||||
It is designed for learning, experimenting, and prototyping in the field of wireless communication. The PlutoSDR
|
||||
is popular among students, educators, and hobbyists due to its versatility and ease of use.
|
||||
|
||||
The PlutoSDR is based on the AD9363 transceiver chip, which supports both transmission and reception of signals
|
||||
across a wide frequency range. The device is supported by a robust open-source ecosystem, making it ideal for
|
||||
hands-on learning and rapid prototyping.
|
||||
|
||||
Supported models
|
||||
----------------
|
||||
|
||||
- **ADALM-PLUTO:** The standard model with a frequency range of 325 MHz to 3.8 GHz and a bandwidth of up to 20 MHz.
|
||||
- **Modified ADALM-PLUTO:** Some users modify their PlutoSDR to extend the frequency range to approximately 70 MHz
|
||||
to 6 GHz by applying firmware patches with unqualified RF performance.
|
||||
|
||||
Key features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** 325 MHz to 3.8 GHz (standard), expandable with modifications.
|
||||
- **Bandwidth:** Up to 20 MHz, can be increased to 56 MHz with firmware modifications.
|
||||
- **Connectivity:** USB 2.0 interface with support for power, data, and firmware updates.
|
||||
- **Software Support:** Compatible with GNU Radio, MATLAB, Simulink, and other SDR frameworks.
|
||||
- **Onboard Processing:** Integrated ARM Cortex-A9 processor for custom applications and signal processing.
|
||||
|
||||
Hackability
|
||||
------------
|
||||
|
||||
- **Frequency Range and Bandwidth:** The default frequency range of 325 MHz to 3.8 GHz can be expanded to
|
||||
approximately 70 MHz to 6 GHz, and the bandwidth can be increased from 20 MHz to 56 MHz by modifying
|
||||
the device's firmware.
|
||||
- **2x2 MIMO:** On Rev C models, users can unlock 2x2 MIMO (Multiple Input Multiple Output) functionality by
|
||||
wiring UFL to SMA connectors to the device's PCB, effectively turning the device into a dual-channel SDR.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Bandwidth is limited to 20 MHz by default, but can be increased to 56 MHz with modifications, which may
|
||||
affect stability.
|
||||
- USB 2.0 connectivity might limit data transfer rates compared to USB 3.0 or Ethernet-based SDRs.
|
||||
|
||||
Set up instructions (Linux, Radioconda)
|
||||
---------------------------------------
|
||||
|
||||
1. Activate your Radioconda environment:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda activate <your-env-name>
|
||||
|
||||
2. Install system dependencies:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
build-essential \
|
||||
git \
|
||||
libxml2-dev \
|
||||
bison \
|
||||
flex \
|
||||
libcdk5-dev \
|
||||
cmake \
|
||||
libusb-1.0-0-dev \
|
||||
libavahi-client-dev \
|
||||
libavahi-common-dev \
|
||||
libaio-dev
|
||||
|
||||
3. Install a ``udev`` rule by creating a link into your Radioconda installation:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/90-libiio.rules /etc/udev/rules.d/90-radioconda-libiio.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
Once you can talk to the hardware, you may want to perform the post-install steps detailed on the `PlutoSDR Documentation <https://wiki.analog.com/university/tools/pluto>`_.
|
||||
|
||||
4. (Optional) Building ``libiio`` or ``libad9361-iio`` from source:
|
||||
|
||||
This step is only required if you want the latest version of these libraries not provided in Radioconda.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Build libiio from source
|
||||
cd ~
|
||||
git clone --branch v0.23 https://github.com/analogdevicesinc/libiio.git
|
||||
cd libiio
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake -DPYTHON_BINDINGS=ON ..
|
||||
make -j"$(nproc)"
|
||||
sudo make install
|
||||
sudo ldconfig
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Build libad9361-iio from source
|
||||
cd ~
|
||||
git clone https://github.com/analogdevicesinc/libad9361-iio.git
|
||||
cd libad9361-iio
|
||||
mkdir -p build
|
||||
cd build
|
||||
cmake ..
|
||||
make -j"$(nproc)"
|
||||
sudo make install
|
||||
|
||||
Further information
|
||||
-------------------
|
||||
|
||||
- `PlutoSDR Documentation <https://wiki.analog.com/university/tools/pluto>`_
|
||||
- `PlutoSDR Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#iio-pluto-sdr>`_
|
||||
|
|
@ -30,111 +30,71 @@ Limitations
|
|||
- Sensitivity and performance can vary depending on the specific model and components.
|
||||
- Requires external software for signal processing and analysis.
|
||||
|
||||
Set up instructions (Linux)
|
||||
---------------------------
|
||||
Set up instructions (Linux, Radioconda)
|
||||
---------------------------------------
|
||||
|
||||
1. If you previously had RTL-SDR drivers installed, purge them first:
|
||||
1. Activate your Radioconda environment:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda activate <your-env-name>
|
||||
|
||||
2. Purge drivers:
|
||||
|
||||
If you already have other drivers installed, purge them from your system.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt purge ^librtlsdr
|
||||
sudo rm -rvf /usr/lib/librtlsdr*
|
||||
sudo rm -rvf /usr/include/rtl-sdr*
|
||||
sudo rm -rvf /usr/local/lib/librtlsdr*
|
||||
sudo rm -rvf /usr/local/include/rtl-sdr*
|
||||
sudo rm -rvf /usr/local/include/rtl_*
|
||||
sudo rm -rvf /usr/lib/librtlsdr*
|
||||
sudo rm -rvf /usr/include/rtl-sdr*
|
||||
sudo rm -rvf /usr/local/lib/librtlsdr*
|
||||
sudo rm -rvf /usr/local/include/rtl-sdr*
|
||||
sudo rm -rvf /usr/local/include/rtl_*
|
||||
sudo rm -rvf /usr/local/bin/rtl_*
|
||||
|
||||
2. Install build dependencies:
|
||||
3. Install RTL-SDR Blog drivers:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt install libusb-1.0-0-dev git cmake pkg-config build-essential
|
||||
|
||||
3. Build ``librtlsdr`` from source:
|
||||
|
||||
The standard ``librtlsdr`` package available via ``apt`` is missing symbols required by the Python
|
||||
bindings. Build from the **rtl-sdr-blog fork**:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/rtlsdrblog/rtl-sdr-blog.git
|
||||
cd rtl-sdr-blog
|
||||
mkdir build && cd build
|
||||
cmake .. -DINSTALL_UDEV_RULES=ON
|
||||
sudo apt-get install libusb-1.0-0-dev git cmake pkg-config build-essential
|
||||
git clone https://github.com/osmocom/rtl-sdr
|
||||
cd rtl-sdr
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ../ -DINSTALL_UDEV_RULES=ON
|
||||
make
|
||||
sudo make install
|
||||
sudo cp ../rtl-sdr.rules /etc/udev/rules.d/
|
||||
sudo ldconfig
|
||||
|
||||
.. important::
|
||||
|
||||
Do not use the osmocom ``rtl-sdr`` repository or the Ubuntu ``librtlsdr-dev`` apt package. Neither
|
||||
provides the ``rtlsdr_set_dithering`` symbol that the Python bindings require.
|
||||
|
||||
4. Blacklist the kernel DVB driver:
|
||||
|
||||
The kernel DVB-T driver (``dvb_usb_rtl28xxu``) claims the RTL-SDR device and prevents ``librtlsdr``
|
||||
from accessing it.
|
||||
|
||||
For most users:
|
||||
4. Blacklist the DVB-T modules that would otherwise claim the device:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
echo 'blacklist dvb_usb_rtl28xxu' | sudo tee /etc/modprobe.d/blacklist-rtlsdr.conf
|
||||
sudo modprobe -r dvb_usb_rtl28xxu
|
||||
|
||||
For **Radioconda** users, a blacklist configuration is already provided in your conda environment:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/etc/modprobe.d/rtl-sdr-blacklist.conf /etc/modprobe.d/radioconda-rtl-sdr-blacklist.conf
|
||||
sudo modprobe -r $(cat $CONDA_PREFIX/etc/modprobe.d/rtl-sdr-blacklist.conf | sed -n -e 's/^blacklist //p')
|
||||
|
||||
If ``modprobe -r`` fails with "Module is in use", unplug the RTL-SDR dongle, run the command again,
|
||||
then plug it back in. Alternatively, reboot — the blacklist takes effect on next boot.
|
||||
.. note::
|
||||
|
||||
.. note::
|
||||
In addition to the Radioconda blacklist file, some systems also require
|
||||
manually blacklisting the following DVB-T modules to prevent them from
|
||||
claiming the device:
|
||||
|
||||
Some systems also require blacklisting additional DVB-T modules. Add these entries to your
|
||||
blacklist configuration if needed:
|
||||
- ``dvb_usb_rtl28xxu``
|
||||
- ``rtl2832``
|
||||
- ``rtl2830``
|
||||
|
||||
- ``rtl2832``
|
||||
- ``rtl2830``
|
||||
Add these entries to ``rtlsdr.conf`` (or create the file at
|
||||
``/etc/modprobe.d/rtlsdr.conf``) if they are not already present.
|
||||
|
||||
5. Reload udev rules:
|
||||
|
||||
For most users (rules are installed by the build step above):
|
||||
5. Install a udev rule by creating a link into your radioconda installation:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
For **Radioconda** users, create a symlink from your conda environment instead:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/udev/rules.d/rtl-sdr.rules /etc/udev/rules.d/radioconda-rtl-sdr.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
6. Install Python packages:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyrtlsdr==0.3.0
|
||||
pip install setuptools==69.5.1
|
||||
|
||||
.. note::
|
||||
|
||||
``pyrtlsdr`` 0.4.0 references a ``rtlsdr_set_dithering`` symbol not present in standard
|
||||
``librtlsdr`` builds. Version 0.3.0 works correctly.
|
||||
|
||||
``pyrtlsdr`` 0.3.0 depends on ``pkg_resources``, which was removed in ``setuptools`` >= 82.
|
||||
Pinning to 69.5.1 ensures ``pkg_resources`` is available.
|
||||
|
||||
Further Information
|
||||
-------------------
|
||||
- `RTL-SDR Official Website <https://www.rtl-sdr.com/>`_
|
||||
- `RTL-SDR Documentation <https://www.rtl-sdr.com/rtl-sdr-quick-start-guide/>`_
|
||||
- `RTL-SDR Documentation <https://www.rtl-sdr.com/rtl-sdr-quick-start-guide/>`_
|
||||
|
|
@ -39,48 +39,18 @@ Limitations
|
|||
Set up instructions (Linux)
|
||||
---------------------------------
|
||||
|
||||
ThinkRF devices require the ``pyrf`` package, which is written in Python 2 syntax and must be patched
|
||||
after installation to work with Python 3.
|
||||
|
||||
.. note::
|
||||
|
||||
``lib2to3`` was fully removed in Python 3.13. ThinkRF support is currently limited to
|
||||
**Python 3.12 and below**.
|
||||
|
||||
1. Install ``lib2to3``:
|
||||
|
||||
On some distributions (including Ubuntu 24.04+), ``lib2to3`` is not included by default:
|
||||
Install PyRF
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt install python3-lib2to3
|
||||
pip install 'pyrf>=2.8.0'
|
||||
|
||||
2. Install ``pyrf``:
|
||||
Convert PyRF scripts to Python 3
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install pyrf
|
||||
|
||||
3. Patch ``pyrf`` for Python 3:
|
||||
|
||||
The ``pyrf`` package contains Python 2 syntax throughout (e.g., ``dict.iteritems()``, ``print``
|
||||
statements). Run the following to automatically convert the entire package to Python 3:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
python -c "
|
||||
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
|
||||
import pyrf, os
|
||||
pyrf_path = os.path.dirname(pyrf.__file__)
|
||||
fixers = get_fixers_from_package('lib2to3.fixes')
|
||||
tool = RefactoringTool(fixers)
|
||||
tool.refactor_dir(pyrf_path, write=True)
|
||||
print('Done')
|
||||
"
|
||||
|
||||
.. note::
|
||||
|
||||
This patches the entire ``pyrf`` package in place, which is required for the driver to fully load.
|
||||
cd ../scripts
|
||||
./convert_pyrf_to_python3.sh
|
||||
|
||||
Further Information
|
||||
-------------------
|
||||
|
|
|
|||
|
|
@ -1,155 +1,92 @@
|
|||
.. _usrp:
|
||||
|
||||
USRP
|
||||
====
|
||||
|
||||
The USRP (Universal Software Radio Peripheral) product line is a series of software-defined radios (SDRs)
|
||||
developed by Ettus Research. These devices are widely used in academia, industry, and research for various
|
||||
wireless communication applications, ranging from simple experimentation to complex signal processing tasks.
|
||||
|
||||
USRP devices offer a flexible platform that can be used with various software frameworks, including GNU Radio
|
||||
and the USRP Hardware Driver (UHD). The product line includes both entry-level models for hobbyists and
|
||||
advanced models for professional and research use.
|
||||
|
||||
Supported models
|
||||
----------------
|
||||
|
||||
- **USRP B200/B210:** Compact, single-board, full-duplex, with a wide frequency range.
|
||||
- **USRP N200/N210:** High-performance models with increased bandwidth and connectivity options.
|
||||
- **USRP X300/X310:** High-end models featuring large bandwidth, multiple MIMO channels, and support for GPSDO.
|
||||
- **USRP E310/E320:** Embedded devices with onboard processing capabilities.
|
||||
- **USRP B200mini:** Ultra-compact model for portable and embedded applications.
|
||||
|
||||
Key features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** Typically covers from DC to 6 GHz, depending on the model and daughter boards used.
|
||||
- **Bandwidth:** Varies by model, up to 160 MHz in some high-end versions.
|
||||
- **Connectivity:** Includes USB 3.0, Ethernet, and PCIe interfaces depending on the model.
|
||||
- **Software Support:** Compatible with UHD, GNU Radio, and other SDR frameworks.
|
||||
|
||||
Hackability
|
||||
-----------
|
||||
|
||||
- The UHD library is fully open source and can be modified to meet user untention.
|
||||
- Certain USRP models have "RFNoC" which streamlines the inclusion of custom FPGA processing in a USRP.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Some models may have limited bandwidth or processing capabilities.
|
||||
- Compatibility with certain software tools may vary depending on the version of the UHD.
|
||||
- Price range can be a consideration, especially for high-end models.
|
||||
|
||||
Set up instructions (Linux)
|
||||
---------------------------
|
||||
|
||||
USRP devices require the UHD (USRP Hardware Driver) library with Python bindings. There is no pip-installable
|
||||
UHD package — it must either be installed via conda or built from source.
|
||||
|
||||
**Option A: Install via conda (recommended for conda environments)**
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda install conda-forge::uhd
|
||||
|
||||
**Option B: Build from source (required for pip/venv environments)**
|
||||
|
||||
The Python bindings must target the same Python version used in your virtual environment.
|
||||
|
||||
1. Install build dependencies:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo apt install cmake build-essential libboost-all-dev libusb-1.0-0-dev \
|
||||
python3-dev python3-numpy libncurses-dev
|
||||
|
||||
2. Install the Mako template library into your virtual environment (used by UHD's build system):
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install mako
|
||||
|
||||
3. Clone and build UHD with your virtual environment activated:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone https://github.com/EttusResearch/uhd.git
|
||||
cd uhd
|
||||
git checkout v4.7.0.0
|
||||
cd host
|
||||
mkdir build && cd build
|
||||
cmake -DENABLE_PYTHON_API=ON -DPYTHON_EXECUTABLE=$(which python3) ..
|
||||
make -j$(nproc)
|
||||
sudo make install
|
||||
sudo ldconfig
|
||||
|
||||
.. important::
|
||||
|
||||
Run the ``cmake`` command with your virtual environment activated so ``$(which python3)`` points
|
||||
to the correct interpreter. Before running ``make``, verify the cmake output includes::
|
||||
|
||||
-- * LibUHD - Python API → must say "Enabling"
|
||||
-- Python interpreter: .../your-venv/bin/python3
|
||||
|
||||
If "LibUHD - Python API" is not listed under enabled components, the Python bindings will not be
|
||||
built. The build typically takes 10–30 minutes.
|
||||
|
||||
4. Copy the Python bindings into your virtual environment if ``import uhd`` fails after installation:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cp -r ~/uhd/host/build/python/uhd ~/.venv/lib/python3.XX/site-packages/
|
||||
|
||||
Replace ``python3.XX`` with your Python version (e.g., ``python3.12``).
|
||||
|
||||
.. note::
|
||||
|
||||
If you have a pre-existing UHD installation built against a different Python version, you will see
|
||||
a circular import error. The bindings must match the Python version in your virtual environment exactly.
|
||||
|
||||
**After either installation method:**
|
||||
|
||||
1. Download UHD FPGA/firmware images:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
uhd_images_downloader
|
||||
|
||||
2. Verify device access:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
uhd_find_devices
|
||||
|
||||
For USB devices (e.g. B-series), install a ``udev`` rule.
|
||||
|
||||
For most users:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
For **Radioconda** users, create a symlink from your conda environment instead:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/uhd/utils/uhd-usrp.rules /etc/udev/rules.d/radioconda-uhd-usrp.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
3. (Optional) Update firmware/FPGA images:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
uhd_usrp_probe
|
||||
|
||||
This will ensure your device is running the latest firmware and FPGA versions.
|
||||
|
||||
Further information
|
||||
-------------------
|
||||
|
||||
- `Official USRP Website <https://www.ettus.com/>`_
|
||||
- `USRP Documentation <https://kb.ettus.com/USRP_Hardware_Driver_and_Interfaces>`_
|
||||
- `USRP Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#uhd-ettus-usrp>`_
|
||||
.. _usrp:
|
||||
|
||||
USRP
|
||||
====
|
||||
|
||||
The USRP (Universal Software Radio Peripheral) product line is a series of software-defined radios (SDRs)
|
||||
developed by Ettus Research. These devices are widely used in academia, industry, and research for various
|
||||
wireless communication applications, ranging from simple experimentation to complex signal processing tasks.
|
||||
|
||||
USRP devices offer a flexible platform that can be used with various software frameworks, including GNU Radio
|
||||
and the USRP Hardware Driver (UHD). The product line includes both entry-level models for hobbyists and
|
||||
advanced models for professional and research use.
|
||||
|
||||
Supported models
|
||||
----------------
|
||||
|
||||
- **USRP B200/B210:** Compact, single-board, full-duplex, with a wide frequency range.
|
||||
- **USRP N200/N210:** High-performance models with increased bandwidth and connectivity options.
|
||||
- **USRP X300/X310:** High-end models featuring large bandwidth, multiple MIMO channels, and support for GPSDO.
|
||||
- **USRP E310/E320:** Embedded devices with onboard processing capabilities.
|
||||
- **USRP B200mini:** Ultra-compact model for portable and embedded applications.
|
||||
|
||||
Key features
|
||||
------------
|
||||
|
||||
- **Frequency Range:** Typically covers from DC to 6 GHz, depending on the model and daughter boards used.
|
||||
- **Bandwidth:** Varies by model, up to 160 MHz in some high-end versions.
|
||||
- **Connectivity:** Includes USB 3.0, Ethernet, and PCIe interfaces depending on the model.
|
||||
- **Software Support:** Compatible with UHD, GNU Radio, and other SDR frameworks.
|
||||
|
||||
Hackability
|
||||
-----------
|
||||
|
||||
- The UHD library is fully open source and can be modified to meet user untention.
|
||||
- Certain USRP models have "RFNoC" which streamlines the inclusion of custom FPGA processing in a USRP.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
- Some models may have limited bandwidth or processing capabilities.
|
||||
- Compatibility with certain software tools may vary depending on the version of the UHD.
|
||||
- Price range can be a consideration, especially for high-end models.
|
||||
|
||||
Set up instructions (Linux, Radioconda)
|
||||
---------------------------------------
|
||||
|
||||
1. Activate your Radioconda environment:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda activate <your-env-name>
|
||||
|
||||
2. Install UHD and Python bindings:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
conda install conda-forge::uhd
|
||||
|
||||
3. Download UHD images:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
uhd_images_downloader
|
||||
|
||||
4. Verify access to your device:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
uhd_find_devices
|
||||
|
||||
For USB devices only (e.g. B series), install a ``udev`` rule by creating a link into your Radioconda installation.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo ln -s $CONDA_PREFIX/lib/uhd/utils/uhd-usrp.rules /etc/udev/rules.d/radioconda-uhd-usrp.rules
|
||||
sudo udevadm control --reload
|
||||
sudo udevadm trigger
|
||||
|
||||
5. (Optional) Update firmware/FPGA images:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
uhd_usrp_probe
|
||||
|
||||
This will ensure your device is running the latest firmware and FPGA versions.
|
||||
|
||||
Further information
|
||||
-------------------
|
||||
|
||||
- `Official USRP Website <https://www.ettus.com/>`_
|
||||
- `USRP Documentation <https://kb.ettus.com/USRP_Hardware_Driver_and_Interfaces>`_
|
||||
- `USRP Setup with Radioconda <https://github.com/radioconda/radioconda-installer?tab=readme-ov-file#uhd-ettus-usrp>`_
|
||||
|
|
|
|||
377
poetry.lock
generated
377
poetry.lock
generated
|
|
@ -1,4 +1,4 @@
|
|||
# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 2.3.4 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "alabaster"
|
||||
|
|
@ -242,14 +242,14 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2026.2.25"
|
||||
version = "2026.4.22"
|
||||
description = "Python package for providing Mozilla's CA Bundle."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["agent", "docs", "test"]
|
||||
files = [
|
||||
{file = "certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa"},
|
||||
{file = "certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7"},
|
||||
{file = "certifi-2026.4.22-py3-none-any.whl", hash = "sha256:3cb2210c8f88ba2318d29b0388d1023c8492ff72ecdde4ebdaddbb13a31b1c4a"},
|
||||
{file = "certifi-2026.4.22.tar.gz", hash = "sha256:8d455352a37b71bf76a79caa83a3d6c25afee4a385d632127b6afb3963f1c580"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -491,14 +491,14 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "click"
|
||||
version = "8.3.2"
|
||||
version = "8.3.3"
|
||||
description = "Composable command line interface toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main", "dev", "docs", "server", "test"]
|
||||
files = [
|
||||
{file = "click-8.3.2-py3-none-any.whl", hash = "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d"},
|
||||
{file = "click-8.3.2.tar.gz", hash = "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5"},
|
||||
{file = "click-8.3.3-py3-none-any.whl", hash = "sha256:a2bf429bb3033c89fa4936ffb35d5cb471e3719e1f3c8a7c3fff0b8314305613"},
|
||||
{file = "click-8.3.3.tar.gz", hash = "sha256:398329ad4837b2ff7cbe1dd166a4c0f8900c3ca3a218de04466f38f6497f18a2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -690,61 +690,61 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist"
|
|||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "46.0.7"
|
||||
version = "47.0.0"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "cryptography-46.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:ea42cbe97209df307fdc3b155f1b6fa2577c0defa8f1f7d3be7d31d189108ad4"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b36a4695e29fe69215d75960b22577197aca3f7a25b9cf9d165dcfe9d80bc325"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ad9ef796328c5e3c4ceed237a183f5d41d21150f972455a9d926593a1dcb308"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:73510b83623e080a2c35c62c15298096e2a5dc8d51c3b4e1740211839d0dea77"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cbd5fb06b62bd0721e1170273d3f4d5a277044c47ca27ee257025146c34cbdd1"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:420b1e4109cc95f0e5700eed79908cef9268265c773d3a66f7af1eef53d409ef"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:24402210aa54baae71d99441d15bb5a1919c195398a87b563df84468160a65de"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8a469028a86f12eb7d2fe97162d0634026d92a21f3ae0ac87ed1c4a447886c83"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9694078c5d44c157ef3162e3bf3946510b857df5a3955458381d1c7cfc143ddb"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:42a1e5f98abb6391717978baf9f90dc28a743b7d9be7f0751a6f56a75d14065b"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91bbcb08347344f810cbe49065914fe048949648f6bd5c2519f34619142bbe85"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5d1c02a14ceb9148cc7816249f64f623fbfee39e8c03b3650d842ad3f34d637e"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-win32.whl", hash = "sha256:d23c8ca48e44ee015cd0a54aeccdf9f09004eba9fc96f38c911011d9ff1bd457"},
|
||||
{file = "cryptography-46.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:397655da831414d165029da9bc483bed2fe0e75dde6a1523ec2fe63f3c46046b"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:d151173275e1728cf7839aaa80c34fe550c04ddb27b34f48c232193df8db5842"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:db0f493b9181c7820c8134437eb8b0b4792085d37dbb24da050476ccb664e59c"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ebd6daf519b9f189f85c479427bbd6e9c9037862cf8fe89ee35503bd209ed902"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:b7b412817be92117ec5ed95f880defe9cf18a832e8cafacf0a22337dc1981b4d"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:fbfd0e5f273877695cb93baf14b185f4878128b250cc9f8e617ea0c025dfb022"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:ffca7aa1d00cf7d6469b988c581598f2259e46215e0140af408966a24cf086ce"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:60627cf07e0d9274338521205899337c5d18249db56865f943cbe753aa96f40f"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:80406c3065e2c55d7f49a9550fe0c49b3f12e5bfff5dedb727e319e1afb9bf99"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:c5b1ccd1239f48b7151a65bc6dd54bcfcc15e028c8ac126d3fada09db0e07ef1"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:d5f7520159cd9c2154eb61eb67548ca05c5774d39e9c2c4339fd793fe7d097b2"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:fcd8eac50d9138c1d7fc53a653ba60a2bee81a505f9f8850b6b2888555a45d0e"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:65814c60f8cc400c63131584e3e1fad01235edba2614b61fbfbfa954082db0ee"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-win32.whl", hash = "sha256:fdd1736fed309b4300346f88f74cd120c27c56852c3838cab416e7a166f67298"},
|
||||
{file = "cryptography-46.0.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e06acf3c99be55aa3b516397fe42f5855597f430add9c17fa46bf2e0fb34c9bb"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:462ad5cb1c148a22b2e3bcc5ad52504dff325d17daf5df8d88c17dda1f75f2a4"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:84d4cced91f0f159a7ddacad249cc077e63195c36aac40b4150e7a57e84fffe7"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:128c5edfe5e5938b86b03941e94fac9ee793a94452ad1365c9fc3f4f62216832"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5e51be372b26ef4ba3de3c167cd3d1022934bc838ae9eaad7e644986d2a3d163"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cdf1a610ef82abb396451862739e3fc93b071c844399e15b90726ef7470eeaf2"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1d25aee46d0c6f1a501adcddb2d2fee4b979381346a78558ed13e50aa8a59067"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:cdfbe22376065ffcf8be74dc9a909f032df19bc58a699456a21712d6e5eabfd0"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:abad9dac36cbf55de6eb49badd4016806b3165d396f64925bf2999bcb67837ba"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:935ce7e3cfdb53e3536119a542b839bb94ec1ad081013e9ab9b7cfd478b05006"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:35719dc79d4730d30f1c2b6474bd6acda36ae2dfae1e3c16f2051f215df33ce0"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:7bbc6ccf49d05ac8f7d7b5e2e2c33830d4fe2061def88210a126d130d7f71a85"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a1529d614f44b863a7b480c6d000fe93b59acee9c82ffa027cfadc77521a9f5e"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-win32.whl", hash = "sha256:f247c8c1a1fb45e12586afbb436ef21ff1e80670b2861a90353d9b025583d246"},
|
||||
{file = "cryptography-46.0.7-cp38-abi3-win_amd64.whl", hash = "sha256:506c4ff91eff4f82bdac7633318a526b1d1309fc07ca76a3ad182cb5b686d6d3"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:fc9ab8856ae6cf7c9358430e49b368f3108f050031442eaeb6b9d87e4dcf4e4f"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d3b99c535a9de0adced13d159c5a9cf65c325601aa30f4be08afd680643e9c15"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d02c738dacda7dc2a74d1b2b3177042009d5cab7c7079db74afc19e56ca1b455"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:04959522f938493042d595a736e7dbdff6eb6cc2339c11465b3ff89343b65f65"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:3986ac1dee6def53797289999eabe84798ad7817f3e97779b5061a95b0ee4968"},
|
||||
{file = "cryptography-46.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:258514877e15963bd43b558917bc9f54cf7cf866c38aa576ebf47a77ddbc43a4"},
|
||||
{file = "cryptography-46.0.7.tar.gz", hash = "sha256:e4cfd68c5f3e0bfdad0d38e023239b96a2fe84146481852dffbcca442c245aa5"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:160ad728f128972d362e714054f6ba0067cab7fb350c5202a9ae8ae4ce3ef1a0"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b9a8943e359b7615db1a3ba587994618e094ff3d6fa5a390c73d079ce18b3973"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5c15764f261394b22aef6b00252f5195f46f2ca300bec57149474e2538b31f8"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:9c59ab0e0fa3a180a5a9c59f3a5abe3ef90d474bc56d7fadfbe80359491b615b"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:34b4358b925a5ea3e14384ca781a2c0ef7ac219b57bb9eacc4457078e2b19f92"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:0024b87d47ae2399165a6bfb20d24888881eeab83ae2566d62467c5ff0030ce7"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:1e47422b5557bb82d3fff997e8d92cff4e28b9789576984f08c248d2b3535d93"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:6f29f36582e6151d9686235e586dd35bb67491f024767d10b842e520dc6a07ac"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:a9b761f012a943b7de0e828843c5688d0de94a0578d44d6c85a1bae32f87791f"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4e1de79e047e25d6e9f8cea71c86b4a53aced64134f0f003bbcbf3655fd172c8"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef6b3634087f18d2155b1e8ce264e5345a753da2c5fa9815e7d41315c90f8318"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:11dbb9f50a0f1bb9757b3d8c27c1101780efb8f0bdecfb12439c22a74d64c001"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-win32.whl", hash = "sha256:7fda2f02c9015db3f42bb8a22324a454516ed10a8c29ca6ece6cdbb5efe2a203"},
|
||||
{file = "cryptography-47.0.0-cp311-abi3-win_amd64.whl", hash = "sha256:f5c3296dab66202f1b18a91fa266be93d6aa0c2806ea3d67762c69f60adc71aa"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:be12cb6a204f77ed968bcefe68086eb061695b540a3dd05edac507a3111b25f0"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2ebd84adf0728c039a3be2700289378e1c164afc6748df1a5ed456767bef9ba7"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f68d6fbc7fbbcfb0939fea72c3b96a9f9a6edfc0e1b1d29778a2066030418b1"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:6651d32eff255423503aa276739da98c30f26c40cbeffcc6048e0d54ef704c0c"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3fb8fa48075fad7193f2e5496135c6a76ac4b2aa5a38433df0a539296b377829"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:11438c7518132d95f354fa01a4aa2f806d172a061a7bed18cf18cbdacdb204d7"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8c1a736bbb3288005796c3f7ccb9453360d7fed483b13b9f468aea5171432923"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:f1557695e5c2b86e204f6ce9470497848634100787935ab7adc5397c54abd7ab"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:f9a034b642b960767fb343766ae5ba6ad653f2e890ddd82955aef288ffea8736"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:b1c76fca783aa7698eb21eb14f9c4aa09452248ee54a627d125025a43f83e7a7"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4f7722c97826770bab8ae92959a2e7b20a5e9e9bf4deae68fd86c3ca457bab52"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:09f6d7bf6724f8db8b32f11eccf23efc8e759924bc5603800335cf8859a3ddbd"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-win32.whl", hash = "sha256:6eebcaf0df1d21ce1f90605c9b432dd2c4f4ab665ac29a40d5e3fc68f51b5e63"},
|
||||
{file = "cryptography-47.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:51c9313e90bd1690ec5a75ed047c27c0b8e6c570029712943d6116ef9a90620b"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:14432c8a9bcb37009784f9594a62fae211a2ae9543e96c92b2a8e4c3cd5cd0c4"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:07efe86201817e7d3c18781ca9770bc0db04e1e48c994be384e4602bc38f8f27"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2b45761c6ec22b7c726d6a829558777e32d0f1c8be7c3f3480f9c912d5ee8a10"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:edd4da498015da5b9f26d38d3bfc2e90257bfa9cbed1f6767c282a0025ae649b"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:9af828c0d5a65c70ec729cd7495a4bf1a67ecb66417b8f02ff125ab8a6326a74"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:256d07c78a04d6b276f5df935a9923275f53bd1522f214447fdf365494e2d515"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:5d0e362ff51041b0c0d219cc7d6924d7b8996f57ce5712bdcef71eb3c65a59cc"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:1581aef4219f7ca2849d0250edaa3866212fb74bf5667284f46aa92f9e65c1ca"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:a49a3eb5341b9503fa3000a9a0db033161db90d47285291f53c2a9d2cd1b7f76"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:2207a498b03275d0051589e326b79d4cf59985c99031b05bb292ac52631c37fe"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:7a02675e2fabd0c0fc04c868b8781863cbf1967691543c22f5470500ff840b31"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80887c5cbd1774683cb126f0ab4184567f080071d5acf62205acb354b4b753b7"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-win32.whl", hash = "sha256:ed67ea4e0cfb5faa5bc7ecb6e2b8838f3807a03758eec239d6c21c8769355310"},
|
||||
{file = "cryptography-47.0.0-cp38-abi3-win_amd64.whl", hash = "sha256:835d2d7f47cdc53b3224e90810fb1d36ca94ea29cc1801fb4c1bc43876735769"},
|
||||
{file = "cryptography-47.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f1207974a904e005f762869996cf620e9bf79ecb4622f148550bb48e0eb35a7"},
|
||||
{file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1a405c08857258c11016777e11c02bacbe7ef596faf259305d282272a3a05cbe"},
|
||||
{file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:20fdbe3e38fb67c385d233c89371fa27f9909f6ebca1cecc20c13518dae65475"},
|
||||
{file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:f7db373287273d8af1414cf95dc4118b13ffdc62be521997b0f2b270771fef50"},
|
||||
{file = "cryptography-47.0.0-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:9fe6b7c64926c765f9dff301f9c1b867febcda5768868ca084e18589113732ab"},
|
||||
{file = "cryptography-47.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cffbba3392df0fa8629bb7f43454ee2925059ee158e23c54620b9063912b86c8"},
|
||||
{file = "cryptography-47.0.0.tar.gz", hash = "sha256:9f8e55fe4e63613a5e1cc5819030f27b97742d720203a087802ce4ce9ceb52bb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -752,14 +752,7 @@ cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and pla
|
|||
typing-extensions = {version = ">=4.13.2", markers = "python_full_version < \"3.11.0\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
|
||||
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
|
||||
nox = ["nox[uv] (>=2024.4.15)"]
|
||||
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
|
||||
sdist = ["build (>=1.0.0)"]
|
||||
ssh = ["bcrypt (>=3.1.5)"]
|
||||
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.7)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
|
||||
test-randomorder = ["pytest-randomly"]
|
||||
|
||||
[[package]]
|
||||
name = "cycler"
|
||||
|
|
@ -850,14 +843,14 @@ test = ["pytest (>=6)"]
|
|||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.136.0"
|
||||
version = "0.136.1"
|
||||
description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["server", "test"]
|
||||
files = [
|
||||
{file = "fastapi-0.136.0-py3-none-any.whl", hash = "sha256:8793d44ec7378e2be07f8a013cf7f7aa47d6327d0dfe9804862688ec4541a6b4"},
|
||||
{file = "fastapi-0.136.0.tar.gz", hash = "sha256:cf08e067cc66e106e102d9ba659463abfac245200752f8a5b7b1e813de4ff73e"},
|
||||
{file = "fastapi-0.136.1-py3-none-any.whl", hash = "sha256:a6e9d7eeada96c93a4d69cb03836b44fa34e2854accb7244a1ece36cd4781c3f"},
|
||||
{file = "fastapi-0.136.1.tar.gz", hash = "sha256:7af665ad7acfa0a3baf8983d393b6b471b9da10ede59c60045f49fbc89a0fa7f"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -1161,18 +1154,18 @@ zstd = ["zstandard (>=0.18.0)"]
|
|||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.11"
|
||||
version = "3.13"
|
||||
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["agent", "docs", "server", "test"]
|
||||
files = [
|
||||
{file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"},
|
||||
{file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"},
|
||||
{file = "idna-3.13-py3-none-any.whl", hash = "sha256:892ea0cde124a99ce773decba204c5552b69c3c67ffd5f232eb7696135bc8bb3"},
|
||||
{file = "idna-3.13.tar.gz", hash = "sha256:585ea8fe5d69b9181ec1afba340451fba6ba764af97026f92a91d4eef164a242"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
|
||||
all = ["mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
|
||||
|
||||
[[package]]
|
||||
name = "imagesize"
|
||||
|
|
@ -1271,7 +1264,7 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
attrs = ">=22.2.0"
|
||||
jsonschema-specifications = ">=2023.03.6"
|
||||
jsonschema-specifications = ">=2023.3.6"
|
||||
referencing = ">=0.28.4"
|
||||
rpds-py = ">=0.25.0"
|
||||
|
||||
|
|
@ -1522,67 +1515,67 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "matplotlib"
|
||||
version = "3.10.8"
|
||||
version = "3.10.9"
|
||||
description = "Python plotting package"
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "matplotlib-3.10.8-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:00270d217d6b20d14b584c521f810d60c5c78406dc289859776550df837dcda7"},
|
||||
{file = "matplotlib-3.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37b3c1cc42aa184b3f738cfa18c1c1d72fd496d85467a6cf7b807936d39aa656"},
|
||||
{file = "matplotlib-3.10.8-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ee40c27c795bda6a5292e9cff9890189d32f7e3a0bf04e0e3c9430c4a00c37df"},
|
||||
{file = "matplotlib-3.10.8-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a48f2b74020919552ea25d222d5cc6af9ca3f4eb43a93e14d068457f545c2a17"},
|
||||
{file = "matplotlib-3.10.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f254d118d14a7f99d616271d6c3c27922c092dac11112670b157798b89bf4933"},
|
||||
{file = "matplotlib-3.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:f9b587c9c7274c1613a30afabf65a272114cd6cdbe67b3406f818c79d7ab2e2a"},
|
||||
{file = "matplotlib-3.10.8-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6be43b667360fef5c754dda5d25a32e6307a03c204f3c0fc5468b78fa87b4160"},
|
||||
{file = "matplotlib-3.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2b336e2d91a3d7006864e0990c83b216fcdca64b5a6484912902cef87313d78"},
|
||||
{file = "matplotlib-3.10.8-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:efb30e3baaea72ce5928e32bab719ab4770099079d66726a62b11b1ef7273be4"},
|
||||
{file = "matplotlib-3.10.8-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d56a1efd5bfd61486c8bc968fa18734464556f0fb8e51690f4ac25d85cbbbbc2"},
|
||||
{file = "matplotlib-3.10.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:238b7ce5717600615c895050239ec955d91f321c209dd110db988500558e70d6"},
|
||||
{file = "matplotlib-3.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:18821ace09c763ec93aef5eeff087ee493a24051936d7b9ebcad9662f66501f9"},
|
||||
{file = "matplotlib-3.10.8-cp311-cp311-win_arm64.whl", hash = "sha256:bab485bcf8b1c7d2060b4fcb6fc368a9e6f4cd754c9c2fea281f4be21df394a2"},
|
||||
{file = "matplotlib-3.10.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:64fcc24778ca0404ce0cb7b6b77ae1f4c7231cdd60e6778f999ee05cbd581b9a"},
|
||||
{file = "matplotlib-3.10.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9a5ca4ac220a0cdd1ba6bcba3608547117d30468fefce49bb26f55c1a3d5c58"},
|
||||
{file = "matplotlib-3.10.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ab4aabc72de4ff77b3ec33a6d78a68227bf1123465887f9905ba79184a1cc04"},
|
||||
{file = "matplotlib-3.10.8-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:24d50994d8c5816ddc35411e50a86ab05f575e2530c02752e02538122613371f"},
|
||||
{file = "matplotlib-3.10.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:99eefd13c0dc3b3c1b4d561c1169e65fe47aab7b8158754d7c084088e2329466"},
|
||||
{file = "matplotlib-3.10.8-cp312-cp312-win_amd64.whl", hash = "sha256:dd80ecb295460a5d9d260df63c43f4afbdd832d725a531f008dad1664f458adf"},
|
||||
{file = "matplotlib-3.10.8-cp312-cp312-win_arm64.whl", hash = "sha256:3c624e43ed56313651bc18a47f838b60d7b8032ed348911c54906b130b20071b"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3f2e409836d7f5ac2f1c013110a4d50b9f7edc26328c108915f9075d7d7a91b6"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56271f3dac49a88d7fca5060f004d9d22b865f743a12a23b1e937a0be4818ee1"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a0a7f52498f72f13d4a25ea70f35f4cb60642b466cbb0a9be951b5bc3f45a486"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646d95230efb9ca614a7a594d4fcacde0ac61d25e37dd51710b36477594963ce"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f89c151aab2e2e23cb3fe0acad1e8b82841fd265379c4cecd0f3fcb34c15e0f6"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313-win_amd64.whl", hash = "sha256:e8ea3e2d4066083e264e75c829078f9e149fa119d27e19acd503de65e0b13149"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313-win_arm64.whl", hash = "sha256:c108a1d6fa78a50646029cb6d49808ff0fc1330fda87fa6f6250c6b5369b6645"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ad3d9833a64cf48cc4300f2b406c3d0f4f4724a91c0bd5640678a6ba7c102077"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:eb3823f11823deade26ce3b9f40dcb4a213da7a670013929f31d5f5ed1055b22"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d9050fee89a89ed57b4fb2c1bfac9a3d0c57a0d55aed95949eedbc42070fea39"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b44d07310e404ba95f8c25aa5536f154c0a8ec473303535949e52eb71d0a1565"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:0a33deb84c15ede243aead39f77e990469fff93ad1521163305095b77b72ce4a"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313t-win_amd64.whl", hash = "sha256:3a48a78d2786784cc2413e57397981fb45c79e968d99656706018d6e62e57958"},
|
||||
{file = "matplotlib-3.10.8-cp313-cp313t-win_arm64.whl", hash = "sha256:15d30132718972c2c074cd14638c7f4592bd98719e2308bccea40e0538bc0cb5"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b53285e65d4fa4c86399979e956235deb900be5baa7fc1218ea67fbfaeaadd6f"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:32f8dce744be5569bebe789e46727946041199030db8aeb2954d26013a0eb26b"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cf267add95b1c88300d96ca837833d4112756045364f5c734a2276038dae27d"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2cf5bd12cecf46908f286d7838b2abc6c91cda506c0445b8223a7c19a00df008"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:41703cc95688f2516b480f7f339d8851a6035f18e100ee6a32bc0b8536a12a9c"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314-win_amd64.whl", hash = "sha256:83d282364ea9f3e52363da262ce32a09dfe241e4080dcedda3c0db059d3c1f11"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314-win_arm64.whl", hash = "sha256:2c1998e92cd5999e295a731bcb2911c75f597d937341f3030cc24ef2733d78a8"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b5a2b97dbdc7d4f353ebf343744f1d1f1cca8aa8bfddb4262fcf4306c3761d50"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3f5c3e4da343bba819f0234186b9004faba952cc420fbc522dc4e103c1985908"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f62550b9a30afde8c1c3ae450e5eb547d579dd69b25c2fc7a1c67f934c1717a"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:495672de149445ec1b772ff2c9ede9b769e3cb4f0d0aa7fa730d7f59e2d4e1c1"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:595ba4d8fe983b88f0eec8c26a241e16d6376fe1979086232f481f8f3f67494c"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314t-win_amd64.whl", hash = "sha256:25d380fe8b1dc32cf8f0b1b448470a77afb195438bafdf1d858bfb876f3edf7b"},
|
||||
{file = "matplotlib-3.10.8-cp314-cp314t-win_arm64.whl", hash = "sha256:113bb52413ea508ce954a02c10ffd0d565f9c3bc7f2eddc27dfe1731e71c7b5f"},
|
||||
{file = "matplotlib-3.10.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f97aeb209c3d2511443f8797e3e5a569aebb040d4f8bc79aa3ee78a8fb9e3dd8"},
|
||||
{file = "matplotlib-3.10.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fb061f596dad3a0f52b60dc6a5dec4a0c300dec41e058a7efe09256188d170b7"},
|
||||
{file = "matplotlib-3.10.8-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:12d90df9183093fcd479f4172ac26b322b1248b15729cb57f42f71f24c7e37a3"},
|
||||
{file = "matplotlib-3.10.8-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:6da7c2ce169267d0d066adcf63758f0604aa6c3eebf67458930f9d9b79ad1db1"},
|
||||
{file = "matplotlib-3.10.8-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9153c3292705be9f9c64498a8872118540c3f4123d1a1c840172edf262c8be4a"},
|
||||
{file = "matplotlib-3.10.8-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ae029229a57cd1e8fe542485f27e7ca7b23aa9e8944ddb4985d0bc444f1eca2"},
|
||||
{file = "matplotlib-3.10.8.tar.gz", hash = "sha256:2299372c19d56bcd35cf05a2738308758d32b9eaed2371898d8f5bd33f084aa3"},
|
||||
{file = "matplotlib-3.10.9-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77210dce9cb8153dffc967efaae990543392563d5a376d4dd8539bebcb0ed217"},
|
||||
{file = "matplotlib-3.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1e7698ac9868428e84d2c967424803b2472ff7167d9d6590d4204ed775343c3b"},
|
||||
{file = "matplotlib-3.10.9-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1aa972116abb4c9d201bf245620b433726cb6856f3bef6a78f776a00f5c92d37"},
|
||||
{file = "matplotlib-3.10.9-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae2f11957b27ce53497dd4d7b235c4d4f1faf383dfb39d0c5beb833bff883294"},
|
||||
{file = "matplotlib-3.10.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b049278ddce116aaa1c1377ebf58adea909132dfce0281cf7e3a1ea9fc2e2c65"},
|
||||
{file = "matplotlib-3.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:82834c3c292d24d3a8aae77cd2d20019de69d692a34a970e4fdb8d33e2ea3dda"},
|
||||
{file = "matplotlib-3.10.9-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:68cfdcede415f7c8f5577b03303dd94526cdb6d11036cecdc205e08733b2d2bb"},
|
||||
{file = "matplotlib-3.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfca0129678bd56379db26c52b5d77ed7de314c047492fbdc763aa7501710cfb"},
|
||||
{file = "matplotlib-3.10.9-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8e436d155fa8a3399dc62683f8f5d0e2e50d25d0144a73edd73f82eec8f4abfb"},
|
||||
{file = "matplotlib-3.10.9-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56fc0bd271b00025c6edfdc7c2dcd247372c8e1544971d62e1dc7c17367e8bf9"},
|
||||
{file = "matplotlib-3.10.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a5a6104ed666402ba5106d7f36e0e0cdca4e8d7fa4d39708ca88019e2835a2eb"},
|
||||
{file = "matplotlib-3.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:d730e984eddf56974c3e72b6129c7ca462ac38dc624338f4b0b23eb23ecba00f"},
|
||||
{file = "matplotlib-3.10.9-cp311-cp311-win_arm64.whl", hash = "sha256:51bf0ddbdc598e060d46c16b5590708f81a1624cefbaaf62f6a81bf9285b8c80"},
|
||||
{file = "matplotlib-3.10.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0c3c28d9fbcc1fe7a03be236d73430cf6409c41fb2383a7ac52fe932b072cb1"},
|
||||
{file = "matplotlib-3.10.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:41cb28c2bd769aa3e98322c6ab09854cbcc52ab69d2759d681bba3e327b2b320"},
|
||||
{file = "matplotlib-3.10.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ae20801130378b82d647ff5047c07316295b68dc054ca6b3c13519d0ea624285"},
|
||||
{file = "matplotlib-3.10.9-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6c63ebcd8b4b169eb2f5c200552ae6b8be8999a005b6b507ed76fb8d7d674fe2"},
|
||||
{file = "matplotlib-3.10.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d75d11c949914165976c621b2324f9ef162af7ebf4b057ddf95dd1dba7e5edcf"},
|
||||
{file = "matplotlib-3.10.9-cp312-cp312-win_amd64.whl", hash = "sha256:d091f9d758b34aaaaa6331d13574bf01891d903b3dec59bfff458ef7551de5d6"},
|
||||
{file = "matplotlib-3.10.9-cp312-cp312-win_arm64.whl", hash = "sha256:10cc5ce06d10231c36f40e875f3c7e8050362a4ee8f0ee5d29a6b3277d57bb42"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b580440f1ff81a0e34122051a3dfabb7e4b7f9e380629929bde0eff9af72165f"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b1b745c489cd1a77a0dc1120a05dc87af9798faebc913601feb8c73d89bf2d1e"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8f3bcac1ca5ed000a6f4337d47ba67dfddf37ed6a46c15fd7f014997f7bf865f"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a8d66a55def891c33147ba3ba9bfcabf0b526a43764c818acbb4525e5ed0838"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d843374407c4017a6403b59c6c81606773d136f3259d5b6da3131bc814542cc2"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313-win_amd64.whl", hash = "sha256:f4399f64b3e94cd500195490972ae1ee81170df1636fa15364d157d5bdd7b921"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313-win_arm64.whl", hash = "sha256:ba7b3b8ef09eab7df0e86e9ae086faa433efbfbdb46afcb3aa16aabf779469a8"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:09218df8a93712bd6ea133e83a153c755448cf7868316c531cffcc43f69d1cc9"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:82368699727bfb7b0182e1aa13082e3c08e092fa1a25d3e1fd92405bff96f6d4"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3225f4e1edcb8c86c884ddf79ebe20ecd0a67d30188f279897554ccd8fded4dc"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de2445a0c6690d21b7eb6ce071cebad6d40a2e9bdf10d039074a96ba19797b99"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:b2b9516251cb89ff618d757daec0e2ed1bf21248013844a853d87ef85ab3081d"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313t-win_amd64.whl", hash = "sha256:e9fae004b941b23ff2edcf1567a857ed77bafc8086ffa258190462328434faf8"},
|
||||
{file = "matplotlib-3.10.9-cp313-cp313t-win_arm64.whl", hash = "sha256:6b63d9c7c769b88ab81e10dc86e4e0607cf56817b9f9e6cf24b2a5f1693b8e38"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:172db52c9e683f5d12eaf57f0f54834190e12581fe1cc2a19595a8f5acb4e77d"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:97e35e8d39ccc85859095e01a53847432ba9a53ddf7986f7a54a11b73d0e143f"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aba1615dabe83188e19d4f75a253c6a08423e04c1425e64039f800050a69de6b"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34cf8167e023ad956c15f36302911d5406bd99a9862c1a8499ea6f7c0e015dc2"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:59476c6d29d612b8e9bb6ce8c5b631be6ba8f9e3a2421f22a02b192c7dd28716"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314-win_amd64.whl", hash = "sha256:336b9acc64d309063126edcdaca00db9373af3c476bb94388fe9c5a53ad13e6f"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314-win_arm64.whl", hash = "sha256:2dc9477819ffd78ad12a20df1d9d6a6bd4fec6aaa9072681465fddca052f1456"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:da4e09638420548f31c354032a6250e473c68e5a4e96899b4844cf39ddea23fe"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:345f6f68ecc8da0ca56fad2ea08fde1a115eda530079eca185d50a7bc3e146c6"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4edcfbd8565339aa62f1cd4012f7180926fdbe71850f7b0d3c379c175cd6b66c"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6be157fe17fc37cb95ac1d7374cf717ce9259616edec911a78d9d26dae8522d4"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4e42042d54db34fda4e95a7bd3e5789c2a995d2dad3eb8850232ee534092fbbf"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314t-win_amd64.whl", hash = "sha256:c27df8b3848f32a83d1767566595e43cfaa4460380974da06f4279a7ec143c39"},
|
||||
{file = "matplotlib-3.10.9-cp314-cp314t-win_arm64.whl", hash = "sha256:a49f1eadc84ca85fd72fa4e89e70e61bf86452df6f971af04b12c60761a0772c"},
|
||||
{file = "matplotlib-3.10.9-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1872fb212a05b729e649754a72d5da61d03e0554d76e80303b6f83d1d2c0552b"},
|
||||
{file = "matplotlib-3.10.9-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:985f2238880e2e69093f588f5fe2e46771747febf0649f3cf7f7b7480875317f"},
|
||||
{file = "matplotlib-3.10.9-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6640f75af2c6148293caa0a2b39dd806a492dd66c8a8b04035813e33d0fd2585"},
|
||||
{file = "matplotlib-3.10.9-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:42fb814efabe95c06c1994d8ab5a8385f43a249e23badd3ba931d4308e5bca20"},
|
||||
{file = "matplotlib-3.10.9-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f76e640a5268850bfda54b5131b1b1941cc685e42c5fa98ed9f2d64038308cba"},
|
||||
{file = "matplotlib-3.10.9-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3fc0364dfbe1d07f6d15c5ebd0c5bf89e126916e5a8667dd4a7a6e84c36653d4"},
|
||||
{file = "matplotlib-3.10.9.tar.gz", hash = "sha256:fd66508e8c6877d98e586654b608a0456db8d7e8a546eb1e2600efd957302358"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -1597,7 +1590,7 @@ pyparsing = ">=3"
|
|||
python-dateutil = ">=2.7"
|
||||
|
||||
[package.extras]
|
||||
dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"]
|
||||
dev = ["meson-python (>=0.13.1,<0.17.0)", "pybind11 (>=2.13.2,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7,<10)"]
|
||||
|
||||
[[package]]
|
||||
name = "mccabe"
|
||||
|
|
@ -1611,25 +1604,6 @@ files = [
|
|||
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mpmath"
|
||||
version = "1.3.0"
|
||||
description = "Python library for arbitrary-precision floating-point arithmetic"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
groups = ["server", "test"]
|
||||
markers = "python_version >= \"3.11\""
|
||||
files = [
|
||||
{file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
|
||||
{file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"]
|
||||
docs = ["sphinx"]
|
||||
gmpy = ["gmpy2 (>=2.1.0a4) ; platform_python_implementation != \"PyPy\""]
|
||||
tests = ["pytest (>=4.6)"]
|
||||
|
||||
[[package]]
|
||||
name = "mypy-extensions"
|
||||
version = "1.1.0"
|
||||
|
|
@ -1717,37 +1691,37 @@ markers = {server = "python_version >= \"3.11\"", test = "python_version >= \"3.
|
|||
|
||||
[[package]]
|
||||
name = "onnxruntime"
|
||||
version = "1.24.4"
|
||||
version = "1.25.0"
|
||||
description = "ONNX Runtime is a runtime accelerator for Machine Learning models"
|
||||
optional = false
|
||||
python-versions = ">=3.11"
|
||||
groups = ["server", "test"]
|
||||
markers = "python_version >= \"3.11\""
|
||||
files = [
|
||||
{file = "onnxruntime-1.24.4-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:0bdfce8e9a6497cec584aab407b71bf697dac5e1b7b7974adc50bf7533bdb3a2"},
|
||||
{file = "onnxruntime-1.24.4-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:046ff290045a387676941a02a8ae5c3ebec6b4f551ae228711968c4a69d8f6b7"},
|
||||
{file = "onnxruntime-1.24.4-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e54ad52e61d2d4618dcff8fa1480ac66b24ee2eab73331322db1049f11ccf330"},
|
||||
{file = "onnxruntime-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b43b63eb24a2bc8fc77a09be67587a570967a412cccb837b6245ccb546691153"},
|
||||
{file = "onnxruntime-1.24.4-cp311-cp311-win_arm64.whl", hash = "sha256:e26478356dba25631fb3f20112e345f8e8bf62c499bb497e8a559f7d69cf7e7b"},
|
||||
{file = "onnxruntime-1.24.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:cad1c2b3f455c55678ab2a8caa51fb420c25e6e3cf10f4c23653cdabedc8de78"},
|
||||
{file = "onnxruntime-1.24.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1a5c5a544b22f90859c88617ecb30e161ee3349fcc73878854f43d77f00558b5"},
|
||||
{file = "onnxruntime-1.24.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d640eb9f3782689b55cfa715094474cd5662f2f137be6a6f847a594b6e9705c"},
|
||||
{file = "onnxruntime-1.24.4-cp312-cp312-win_amd64.whl", hash = "sha256:535b29475ca42b593c45fbb2152fbf1cdf3f287315bf650e6a724a0a1d065cdb"},
|
||||
{file = "onnxruntime-1.24.4-cp312-cp312-win_arm64.whl", hash = "sha256:e6214096e14b7b52e3bee1903dc12dc7ca09cb65e26664668a4620cc5e6f9a90"},
|
||||
{file = "onnxruntime-1.24.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e99a48078baaefa2b50fe5836c319499f71f13f76ed32d0211f39109147a49e0"},
|
||||
{file = "onnxruntime-1.24.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4aaed1e5e1aaacf2343c838a30a7c3ade78f13eeb16817411f929d04040a13"},
|
||||
{file = "onnxruntime-1.24.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e30c972bc02e072911aabb6891453ec73795386c0af2b761b65444b8a4c4745f"},
|
||||
{file = "onnxruntime-1.24.4-cp313-cp313-win_amd64.whl", hash = "sha256:3b6ba8b0181a3aa88edab00eb01424ffc06f42e71095a91186c2249415fcff93"},
|
||||
{file = "onnxruntime-1.24.4-cp313-cp313-win_arm64.whl", hash = "sha256:71d6a5c1821d6e8586a024000ece458db8f2fc0ecd050435d45794827ce81e19"},
|
||||
{file = "onnxruntime-1.24.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1700f559c8086d06b2a4d5de51e62cb4ff5e2631822f71a36db8c72383db71ee"},
|
||||
{file = "onnxruntime-1.24.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c74e268dc808e61e63784d43f9ddcdaf50a776c2819e8bd1d1b11ef64bf7e36"},
|
||||
{file = "onnxruntime-1.24.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:fbff2a248940e3398ae78374c5a839e49a2f39079b488bc64439fa0ec327a3e4"},
|
||||
{file = "onnxruntime-1.24.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2b7969e72d8cb53ffc88ab6d49dd5e75c1c663bda7be7eb0ece192f127343d1"},
|
||||
{file = "onnxruntime-1.24.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14ed1f197fab812b695a5eaddb536c635e58a2fbbe50a517c78f082cc6ce9177"},
|
||||
{file = "onnxruntime-1.24.4-cp314-cp314-win_amd64.whl", hash = "sha256:311e309f573bf3c12aa5723e23823077f83d5e412a18499d4485c7eb41040858"},
|
||||
{file = "onnxruntime-1.24.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f0b910e86b759a4732663ec61fd57ac42ee1b0066f68299de164220b660546d"},
|
||||
{file = "onnxruntime-1.24.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa12ddc54c9c4594073abcaa265cd9681e95fb89dae982a6f508a794ca42e661"},
|
||||
{file = "onnxruntime-1.24.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1cc6a518255f012134bc791975a6294806be9a3b20c4a54cca25194c90cf731"},
|
||||
{file = "onnxruntime-1.25.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a71baa8e0e2f3417106e3a8b2183fd5741875b998041f1a2422a1d0240f302cb"},
|
||||
{file = "onnxruntime-1.25.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15f220ed2ac4a549c97a31e57f21311add9f13d381f13ed1a52be5b25275038c"},
|
||||
{file = "onnxruntime-1.25.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a349feb15476092372eb045a340cd88e53f7965ce5b489ebf5fea20c0bd49add"},
|
||||
{file = "onnxruntime-1.25.0-cp311-cp311-win_amd64.whl", hash = "sha256:d32eff37efac78c676f1a3b3102863de5e55fbabc18348ec2c1439c18f3e90fa"},
|
||||
{file = "onnxruntime-1.25.0-cp311-cp311-win_arm64.whl", hash = "sha256:fcb074b3c62ffa315e222ad246e46125b046b9b531c719852deda7c89b72ebb7"},
|
||||
{file = "onnxruntime-1.25.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8ecd3362de3fb496fb3e2d055a95d5acab611cf759a27609c6d99704c9d8f184"},
|
||||
{file = "onnxruntime-1.25.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c99238d20bfa80ac68c7b03c2c936d389189ae40997f78a30d151570d7e18bf"},
|
||||
{file = "onnxruntime-1.25.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be93baa694ef8e5831fcb7b542da21f502b122918b5b9612d9f02972e043ee01"},
|
||||
{file = "onnxruntime-1.25.0-cp312-cp312-win_amd64.whl", hash = "sha256:9596040c1f7d247bbfab5d4db1e7651c790235e48e460c7d445ec81687d5a182"},
|
||||
{file = "onnxruntime-1.25.0-cp312-cp312-win_arm64.whl", hash = "sha256:463aed7f5e4a3ca5a476db7e9bba9164fa26921ef34c37e59b28c4c61e55f266"},
|
||||
{file = "onnxruntime-1.25.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:1b3d76cf770afba76859f270679c9ad0b017b9357eb5892e91926943e05ca82c"},
|
||||
{file = "onnxruntime-1.25.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cddb565dfd630550a8817b3d5493ffcfa0fec273b545b2816f2fce53384e1151"},
|
||||
{file = "onnxruntime-1.25.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ade74e651e28b39e6bfd6f576cb9b8a4edfa0916234145154dc891bd55331c22"},
|
||||
{file = "onnxruntime-1.25.0-cp313-cp313-win_amd64.whl", hash = "sha256:9196c32c039c37ce8362cbee0aa3a704679be5f2b6fb3e849fea927c98fe1e5b"},
|
||||
{file = "onnxruntime-1.25.0-cp313-cp313-win_arm64.whl", hash = "sha256:b3e52dc2208dec6f61ef118dff04610927e9a18d99e019a828799b23cc9cdea4"},
|
||||
{file = "onnxruntime-1.25.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de8548d8fe8fd58ca841178051d535d6f378efae14a4b4eb336617d80540fb41"},
|
||||
{file = "onnxruntime-1.25.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4edec672d09e34b9e83ad09c44454ce97627388f32858b1d59fe01d091ff54b5"},
|
||||
{file = "onnxruntime-1.25.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:38f27febd2ff034a600a8bdbea34b1f7c961a2dab6bcb5351e70548fea456161"},
|
||||
{file = "onnxruntime-1.25.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e0ae389ed1647f11c1b501ba1cef1e2c7453002f626136ace214c9c46153ee4"},
|
||||
{file = "onnxruntime-1.25.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ca32d38173c0f58699ca9dc9e867de74d2c2ab7d1c2d969f862ee8633370b77"},
|
||||
{file = "onnxruntime-1.25.0-cp314-cp314-win_amd64.whl", hash = "sha256:a2829e29621db7a4bcd457e6d0f3e4f541fb274c7127e7d2e1a5b46c70572672"},
|
||||
{file = "onnxruntime-1.25.0-cp314-cp314-win_arm64.whl", hash = "sha256:2bed9b35568b3ecf8ab34dc832d37216e47947e86508a0fd6b75e4c19d7ba907"},
|
||||
{file = "onnxruntime-1.25.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:00548a16e8f0d52cb1c67ef50177e5e2be848ccffc6db60010ee37faaccbbb6f"},
|
||||
{file = "onnxruntime-1.25.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a071a0740388e0ffad081c583761f37837b113bde3d03dc70790ed6cf4f4de0b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -1755,18 +1729,21 @@ flatbuffers = "*"
|
|||
numpy = ">=1.21.6"
|
||||
packaging = "*"
|
||||
protobuf = "*"
|
||||
sympy = "*"
|
||||
|
||||
[package.extras]
|
||||
quantization = ["ml_dtypes"]
|
||||
symbolic = ["sympy"]
|
||||
|
||||
[[package]]
|
||||
name = "packaging"
|
||||
version = "26.1"
|
||||
version = "26.2"
|
||||
description = "Core utilities for Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "dev", "docs", "server", "test"]
|
||||
files = [
|
||||
{file = "packaging-26.1-py3-none-any.whl", hash = "sha256:5d9c0669c6285e491e0ced2eee587eaf67b670d94a19e94e3984a481aba6802f"},
|
||||
{file = "packaging-26.1.tar.gz", hash = "sha256:f042152b681c4bfac5cae2742a55e103d27ab2ec0f3d88037136b6bfe7c9c5de"},
|
||||
{file = "packaging-26.2-py3-none-any.whl", hash = "sha256:5fc45236b9446107ff2415ce77c807cee2862cb6fac22b8a73826d0693b0980e"},
|
||||
{file = "packaging-26.2.tar.gz", hash = "sha256:ff452ff5a3e828ce110190feff1178bb1f2ea2281fa2075aadb987c2fb221661"},
|
||||
]
|
||||
markers = {server = "python_version >= \"3.11\""}
|
||||
|
||||
|
|
@ -1893,21 +1870,20 @@ gssapi = ["gssapi (>=1.4.1) ; platform_system != \"Windows\"", "pyasn1 (>=0.1.7)
|
|||
|
||||
[[package]]
|
||||
name = "pathspec"
|
||||
version = "1.0.4"
|
||||
version = "1.1.1"
|
||||
description = "Utility library for gitignore style pattern matching of file paths."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
files = [
|
||||
{file = "pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723"},
|
||||
{file = "pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645"},
|
||||
{file = "pathspec-1.1.1-py3-none-any.whl", hash = "sha256:a00ce642f577bf7f473932318056212bc4f8bfdf53128c78bbd5af0b9b20b189"},
|
||||
{file = "pathspec-1.1.1.tar.gz", hash = "sha256:17db5ecd524104a120e173814c90367a96a98d07c45b2e10c2f3919fff91bf5a"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
hyperscan = ["hyperscan (>=0.7)"]
|
||||
optional = ["typing-extensions (>=4)"]
|
||||
re2 = ["google-re2 (>=1.1)"]
|
||||
tests = ["pytest (>=9)", "typing-extensions (>=4.15)"]
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
|
|
@ -2974,14 +2950,14 @@ test = ["Cython", "array-api-strict (>=2.0,<2.1.1)", "asv", "gmpy2", "hypothesis
|
|||
|
||||
[[package]]
|
||||
name = "sigmf"
|
||||
version = "1.8.0"
|
||||
version = "1.9.0"
|
||||
description = "Easily interact with Signal Metadata Format (SigMF) recordings."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "sigmf-1.8.0-py3-none-any.whl", hash = "sha256:f233ab04344fa3e42170926a646f7e53edd7edc65fcda42eb3d7efaf8a2e8263"},
|
||||
{file = "sigmf-1.8.0.tar.gz", hash = "sha256:91e10cb046499639e5f961d66a24c17a33ff76fc98df892eab0953cc9d659a50"},
|
||||
{file = "sigmf-1.9.0-py3-none-any.whl", hash = "sha256:902e694894e61f8cdb75b0d69ae8c407f82f35435c3c5e4c1b586b313f77b89b"},
|
||||
{file = "sigmf-1.9.0.tar.gz", hash = "sha256:95e4b28156b2182035ecca5f5852108fb3cdef5f20b0cd48919bb0fc5f293d0e"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -3229,25 +3205,6 @@ typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.13\""
|
|||
[package.extras]
|
||||
full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"]
|
||||
|
||||
[[package]]
|
||||
name = "sympy"
|
||||
version = "1.14.0"
|
||||
description = "Computer algebra system (CAS) in Python"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["server", "test"]
|
||||
markers = "python_version >= \"3.11\""
|
||||
files = [
|
||||
{file = "sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5"},
|
||||
{file = "sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
mpmath = ">=1.1.0,<1.4"
|
||||
|
||||
[package.extras]
|
||||
dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.4.1"
|
||||
|
|
@ -3389,14 +3346,14 @@ typing-extensions = ">=4.12.0"
|
|||
|
||||
[[package]]
|
||||
name = "tzdata"
|
||||
version = "2026.1"
|
||||
version = "2026.2"
|
||||
description = "Provider of IANA time zone data"
|
||||
optional = false
|
||||
python-versions = ">=2"
|
||||
groups = ["main"]
|
||||
files = [
|
||||
{file = "tzdata-2026.1-py2.py3-none-any.whl", hash = "sha256:4b1d2be7ac37ceafd7327b961aa3a54e467efbdb563a23655fbfe0d39cfc42a9"},
|
||||
{file = "tzdata-2026.1.tar.gz", hash = "sha256:67658a1903c75917309e753fdc349ac0efd8c27db7a0cb406a25be4840f87f98"},
|
||||
{file = "tzdata-2026.2-py2.py3-none-any.whl", hash = "sha256:bbe9af844f658da81a5f95019480da3a89415801f6cc966806612cc7169bffe7"},
|
||||
{file = "tzdata-2026.2.tar.gz", hash = "sha256:9173fde7d80d9018e02a662e168e5a2d04f87c41ea174b139fbef642eda62d10"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -3419,14 +3376,14 @@ zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""]
|
|||
|
||||
[[package]]
|
||||
name = "uvicorn"
|
||||
version = "0.44.0"
|
||||
version = "0.46.0"
|
||||
description = "The lightning-fast ASGI server."
|
||||
optional = false
|
||||
python-versions = ">=3.10"
|
||||
groups = ["docs", "server", "test"]
|
||||
files = [
|
||||
{file = "uvicorn-0.44.0-py3-none-any.whl", hash = "sha256:ce937c99a2cc70279556967274414c087888e8cec9f9c94644dfca11bd3ced89"},
|
||||
{file = "uvicorn-0.44.0.tar.gz", hash = "sha256:6c942071b68f07e178264b9152f1f16dfac5da85880c4ce06366a96d70d4f31e"},
|
||||
{file = "uvicorn-0.46.0-py3-none-any.whl", hash = "sha256:bbebbcbed972d162afca128605223022bedd345b7bc7855ce66deb31487a9048"},
|
||||
{file = "uvicorn-0.46.0.tar.gz", hash = "sha256:fb9da0926999cc6cb22dc7cd71a94a632f078e6ae47ff683c5c420750fb7413d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
@ -3749,4 +3706,4 @@ files = [
|
|||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.10"
|
||||
content-hash = "66c9adf647316db90f963da05e8a83574378bfa4db2c69ce751446b5ee7c408c"
|
||||
content-hash = "ffde300b2fc93161d2279a6e2b899bc988d3b5eb3833135821830affc9a5fb62"
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ dependencies = [
|
|||
"pyyaml (>=6.0.3,<7.0.0)",
|
||||
"click (>=8.1.0,<9.0.0)",
|
||||
"matplotlib (>=3.8.0,<4.0.0)",
|
||||
"paramiko (>=3.5.1)"
|
||||
"paramiko (>=4.0.0)"
|
||||
]
|
||||
|
||||
# [project.optional-dependencies] Commented out to prevent Tox tests from failing
|
||||
|
|
@ -149,11 +149,6 @@ exclude = '''
|
|||
|
||||
[tool.pytest.ini_options]
|
||||
pythonpath = ["src"]
|
||||
filterwarnings = [
|
||||
# FastAPI emits this internally when handling 422 responses; the constant
|
||||
# is not yet renamed in the installed starlette version, so we can't migrate.
|
||||
"ignore:'HTTP_422_UNPROCESSABLE_ENTITY' is deprecated:DeprecationWarning",
|
||||
]
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
|
|
|
|||
|
|
@ -5,8 +5,11 @@ Subcommands:
|
|||
- ``ria-agent run [legacy args]`` — legacy long-poll NodeAgent (unchanged).
|
||||
- ``ria-agent stream`` — new WebSocket-based IQ streamer.
|
||||
- ``ria-agent detect`` — print SDR drivers whose modules import cleanly.
|
||||
- ``ria-agent register --hub URL --api-key KEY`` — register with the hub and
|
||||
save credentials (and optional TX interlocks) to ``~/.ria/agent.json``.
|
||||
- ``ria-agent register --hub URL --api-key KEY`` — register with the hub
|
||||
using a personal registration key (minted from **Settings → RIA Agents**
|
||||
on the hub, shown once at mint time) and save credentials (and optional
|
||||
TX interlocks) to ``~/.ria/agent.json``. The hub also accepts the legacy
|
||||
shared ``[wac] API_KEY`` for back-compat, but that path is deprecated.
|
||||
|
||||
Invoking ``ria-agent`` with no subcommand falls through to the legacy
|
||||
long-poll behavior for back-compatibility with existing deployments.
|
||||
|
|
@ -28,6 +31,57 @@ from .namegen import generate_agent_name
|
|||
_LEGACY_ALIASES = {"--hub", "--key", "--name", "--device", "--insecure", "--log-level", "--config"}
|
||||
|
||||
|
||||
REGISTRATION_REASON_MESSAGES = {
|
||||
"invalid_key": (
|
||||
"Registration key not recognized. Generate a fresh key from "
|
||||
"Settings → RIA Agents on the hub."
|
||||
),
|
||||
"expired": (
|
||||
"This registration key has expired. Generate a new one from "
|
||||
"Settings → RIA Agents on the hub."
|
||||
),
|
||||
"revoked": (
|
||||
"This registration key was revoked. Generate a new one from "
|
||||
"Settings → RIA Agents on the hub."
|
||||
),
|
||||
"already_consumed": (
|
||||
"This single-use registration key has already been used. "
|
||||
"Generate a new one, or mint a reusable key instead."
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _explain_registration_failure(status: int, body: bytes) -> str:
|
||||
"""Return a human-readable explanation for a failed register call."""
|
||||
try:
|
||||
parsed = json.loads(body) if body else None
|
||||
except ValueError:
|
||||
parsed = None
|
||||
|
||||
if status == 429:
|
||||
# 429 carries a plain string detail, never a reason code.
|
||||
if isinstance(parsed, dict) and parsed.get("detail"):
|
||||
detail = parsed["detail"]
|
||||
else:
|
||||
detail = body.decode("utf-8", "replace") or "rate limited"
|
||||
return f"Registration rate-limited by the hub: {detail}"
|
||||
|
||||
if not isinstance(parsed, dict):
|
||||
text = body.decode("utf-8", "replace")
|
||||
return f"HTTP {status}: {text or 'no body'}"
|
||||
|
||||
detail = parsed.get("detail")
|
||||
if isinstance(detail, dict):
|
||||
reason = detail.get("reason")
|
||||
if reason in REGISTRATION_REASON_MESSAGES:
|
||||
return REGISTRATION_REASON_MESSAGES[reason]
|
||||
if reason:
|
||||
return f"Registration rejected ({reason})"
|
||||
if isinstance(detail, str) and detail:
|
||||
return f"Registration rejected: {detail}"
|
||||
return f"HTTP {status}: {parsed}"
|
||||
|
||||
|
||||
def _cmd_detect(_args: argparse.Namespace) -> int:
|
||||
devices = available_devices()
|
||||
if not devices:
|
||||
|
|
@ -39,6 +93,7 @@ def _cmd_detect(_args: argparse.Namespace) -> int:
|
|||
|
||||
|
||||
def _cmd_register(args: argparse.Namespace) -> int:
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
|
||||
hub_url = args.hub.rstrip("/")
|
||||
|
|
@ -56,6 +111,14 @@ def _cmd_register(args: argparse.Namespace) -> int:
|
|||
try:
|
||||
with urllib.request.urlopen(req) as resp:
|
||||
data = json.loads(resp.read())
|
||||
except urllib.error.HTTPError as e:
|
||||
try:
|
||||
err_body = e.read()
|
||||
except Exception:
|
||||
err_body = b""
|
||||
msg = _explain_registration_failure(e.code, err_body)
|
||||
print(f"error: registration failed: {msg}", file=sys.stderr)
|
||||
return 1
|
||||
except Exception as e:
|
||||
print(f"error: registration failed: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
|
@ -80,7 +143,7 @@ def _cmd_register(args: argparse.Namespace) -> int:
|
|||
cfg.tx_allowed_freq_ranges = [[float(lo), float(hi)] for lo, hi in freq_ranges]
|
||||
path = _config.save(cfg)
|
||||
|
||||
print(f"Registered agent: {agent_id}")
|
||||
print(f"Registered agent: ({name})")
|
||||
if cfg.tx_enabled:
|
||||
caps: list[str] = []
|
||||
if cfg.tx_max_gain_db is not None:
|
||||
|
|
@ -141,7 +204,16 @@ def main() -> None:
|
|||
|
||||
p_reg = sub.add_parser("register", help="Register agent with RIA Hub and save credentials")
|
||||
p_reg.add_argument("--hub", required=True, help="RIA Hub URL (e.g. http://whitehorse:3005)")
|
||||
p_reg.add_argument("--api-key", dest="api_key", required=True, help="Hub API key")
|
||||
p_reg.add_argument(
|
||||
"--api-key",
|
||||
dest="api_key",
|
||||
required=True,
|
||||
help=(
|
||||
"Personal registration key from the RIA Agents page on the hub "
|
||||
"(format: ria_reg_...). Shown once when generated; save it then. "
|
||||
"The legacy shared API key is also accepted but deprecated."
|
||||
),
|
||||
)
|
||||
p_reg.add_argument("--name", default=None, help="Human-friendly agent name")
|
||||
p_reg.add_argument("--insecure", action="store_true", help="Skip TLS verification")
|
||||
p_reg.add_argument(
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ _HEARTBEAT_INTERVAL = 30 # seconds between heartbeats
|
|||
_POLL_TIMEOUT = 30 # server-side long-poll duration
|
||||
_POLL_CLIENT_TIMEOUT = 40 # client read timeout — slightly longer than server
|
||||
_RECONNECT_PAUSE = 5 # seconds to wait after a poll error before retrying
|
||||
_CHUNK_SIZE = 10 * 1024 * 1024 # 10 MB per chunk — fast enough for git-LFS to process within timeout
|
||||
_CHUNK_SIZE = 50 * 1024 * 1024 # 50 MB — well below Cloudflare's 100 MB limit
|
||||
_DIRECT_THRESHOLD = 90 * 1024 * 1024 # files above this use chunked upload
|
||||
_CAPTURE_SAMPLES = 4096 # IQ samples per inference window
|
||||
_IDLE_LABELS = frozenset({"noise", "idle", "no_signal", "unknown_protocol", "background"})
|
||||
|
|
@ -93,24 +93,16 @@ class NodeAgent:
|
|||
name: str,
|
||||
sdr_device: str = "unknown",
|
||||
insecure: bool = False,
|
||||
role: str = "general",
|
||||
session_code: str | None = None,
|
||||
) -> None:
|
||||
self.hub_url = hub_url.rstrip("/")
|
||||
self.api_key = api_key
|
||||
self.name = name
|
||||
self.sdr_device = sdr_device
|
||||
self.insecure = insecure
|
||||
self.role = role
|
||||
self.session_code = session_code
|
||||
|
||||
self.node_id: str | None = None
|
||||
self._stop = threading.Event()
|
||||
|
||||
# ── TX state ────────────────────────────────────────────────────────
|
||||
self._tx_stop = threading.Event()
|
||||
self._tx_thread: threading.Thread | None = None
|
||||
|
||||
# ── Inference state ─────────────────────────────────────────────────
|
||||
# Protected by _inf_lock for cross-thread model swaps.
|
||||
self._inf_lock = threading.Lock()
|
||||
|
|
@ -180,27 +172,19 @@ class NodeAgent:
|
|||
capabilities = ["campaign"]
|
||||
if self._ort_available:
|
||||
capabilities.append("inference")
|
||||
if self.role == "tx":
|
||||
capabilities.append("transmit")
|
||||
payload: dict = {
|
||||
"name": self.name,
|
||||
"sdr_device": self.sdr_device,
|
||||
"ria_toolkit_version": self._ria_version,
|
||||
"capabilities": capabilities,
|
||||
"role": self.role,
|
||||
}
|
||||
if self.session_code:
|
||||
payload["session_code"] = self.session_code
|
||||
resp = self._post("/composer/nodes/register", json=payload, timeout=15)
|
||||
resp = self._post(
|
||||
"/composer/nodes/register",
|
||||
json={
|
||||
"name": self.name,
|
||||
"sdr_device": self.sdr_device,
|
||||
"ria_toolkit_version": self._ria_version,
|
||||
"capabilities": capabilities,
|
||||
},
|
||||
timeout=15,
|
||||
)
|
||||
resp.raise_for_status()
|
||||
self.node_id = resp.json()["node_id"]
|
||||
logger.info(
|
||||
"Registered as %r (node_id=%s, role=%s%s)",
|
||||
self.name,
|
||||
self.node_id,
|
||||
self.role,
|
||||
f", session_code={self.session_code!r}" if self.session_code else "",
|
||||
)
|
||||
logger.info("Registered as %r (node_id=%s)", self.name, self.node_id)
|
||||
|
||||
def _deregister(self) -> None:
|
||||
if not self.node_id:
|
||||
|
|
@ -261,10 +245,9 @@ class NodeAgent:
|
|||
if command == "run_campaign":
|
||||
campaign_id: str = cmd.get("campaign_id") or str(uuid.uuid4())
|
||||
config_dict: dict = cmd.get("payload") or {}
|
||||
skip_local_tx: bool = bool(cmd.get("skip_local_tx", False))
|
||||
threading.Thread(
|
||||
target=self._run_campaign,
|
||||
args=(campaign_id, config_dict, skip_local_tx),
|
||||
args=(campaign_id, config_dict),
|
||||
daemon=True,
|
||||
name=f"campaign-{campaign_id[:8]}",
|
||||
).start()
|
||||
|
|
@ -286,17 +269,6 @@ class NodeAgent:
|
|||
self._stop_inference()
|
||||
elif command == "configure_inference":
|
||||
self._queue_sdr_config(cmd)
|
||||
elif command == "start_transmit":
|
||||
threading.Thread(
|
||||
target=self._start_transmit,
|
||||
args=(cmd,),
|
||||
daemon=True,
|
||||
name="ria-start-tx",
|
||||
).start()
|
||||
elif command == "stop_transmit":
|
||||
self._stop_transmit()
|
||||
elif command == "configure_transmit":
|
||||
logger.info("configure_transmit received — will apply on next step boundary")
|
||||
else:
|
||||
logger.warning("Unknown command %r — ignored", command)
|
||||
|
||||
|
|
@ -304,7 +276,7 @@ class NodeAgent:
|
|||
# Campaign execution
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _run_campaign(self, campaign_id: str, config_dict: dict, skip_local_tx: bool = False) -> None:
|
||||
def _run_campaign(self, campaign_id: str, config_dict: dict) -> None:
|
||||
try:
|
||||
from ria_toolkit_oss.orchestration.campaign import CampaignConfig
|
||||
from ria_toolkit_oss.orchestration.executor import CampaignExecutor
|
||||
|
|
@ -316,10 +288,10 @@ class NodeAgent:
|
|||
)
|
||||
return
|
||||
|
||||
logger.info("Campaign %s starting (skip_local_tx=%s)", campaign_id[:8], skip_local_tx)
|
||||
logger.info("Campaign %s starting", campaign_id[:8])
|
||||
try:
|
||||
config = CampaignConfig.from_dict(config_dict)
|
||||
executor = CampaignExecutor(config, skip_local_tx=skip_local_tx)
|
||||
executor = CampaignExecutor(config)
|
||||
result = executor.run()
|
||||
logger.info("Campaign %s completed — uploading recordings", campaign_id[:8])
|
||||
self._upload_recordings(campaign_id, config, result)
|
||||
|
|
@ -329,58 +301,6 @@ class NodeAgent:
|
|||
logger.error("Campaign %s failed: %s", campaign_id[:8], exc)
|
||||
self._report_campaign_status(campaign_id, "failed", error=str(exc))
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# TX execution
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _start_transmit(self, cmd: dict) -> None:
|
||||
"""Execute a synthetic transmit campaign using TxExecutor.
|
||||
|
||||
The command payload mirrors a TransmitterConfig dict with an optional
|
||||
``schedule`` of steps. Each step synthesises a signal and transmits it
|
||||
via the local SDR in TX mode.
|
||||
"""
|
||||
try:
|
||||
from ria_toolkit_oss.orchestration.tx_executor import TxExecutor
|
||||
except ImportError as exc:
|
||||
logger.error("start_transmit: TxExecutor not available: %s", exc)
|
||||
return
|
||||
|
||||
if self._tx_thread and self._tx_thread.is_alive():
|
||||
logger.warning("start_transmit: TX already running — ignoring duplicate command")
|
||||
return
|
||||
|
||||
self._tx_stop.clear()
|
||||
campaign_id: str = cmd.get("campaign_id") or str(uuid.uuid4())
|
||||
executor = TxExecutor(
|
||||
config=cmd,
|
||||
sdr_device=self.sdr_device,
|
||||
stop_event=self._tx_stop,
|
||||
)
|
||||
self._tx_thread = threading.Thread(
|
||||
target=self._run_tx_campaign,
|
||||
args=(executor, campaign_id),
|
||||
daemon=True,
|
||||
name=f"tx-campaign-{campaign_id[:8]}",
|
||||
)
|
||||
self._tx_thread.start()
|
||||
|
||||
def _run_tx_campaign(self, executor: Any, campaign_id: str) -> None:
|
||||
try:
|
||||
executor.run()
|
||||
logger.info("TX campaign %s completed", campaign_id[:8])
|
||||
self._report_campaign_status(campaign_id, "completed")
|
||||
except Exception as exc:
|
||||
logger.error("TX campaign %s failed: %s", campaign_id[:8], exc)
|
||||
self._report_campaign_status(campaign_id, "failed", error=str(exc))
|
||||
|
||||
def _stop_transmit(self) -> None:
|
||||
"""Signal the TX loop to stop gracefully."""
|
||||
self._tx_stop.set()
|
||||
if self._tx_thread and self._tx_thread.is_alive():
|
||||
self._tx_thread.join(timeout=5.0)
|
||||
logger.info("TX stopped")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Inference — model loading
|
||||
# ------------------------------------------------------------------
|
||||
|
|
@ -659,18 +579,13 @@ class NodeAgent:
|
|||
base_url = f"{self.hub_url}/datasets/upload"
|
||||
steps = (result.get("steps") if isinstance(result, dict) else getattr(result, "steps", None)) or []
|
||||
|
||||
output_obj = getattr(config, "output", None)
|
||||
folder = getattr(output_obj, "folder", None)
|
||||
campaign_name: str = folder if folder is not None else (getattr(config, "name", None) or "")
|
||||
for step in steps:
|
||||
output_path: str | None = getattr(step, "output_path", None)
|
||||
if not output_path:
|
||||
continue
|
||||
device_id: str = getattr(step, "transmitter_id", "") or ""
|
||||
for fpath in _sigmf_files(output_path):
|
||||
basename = os.path.basename(fpath)
|
||||
path_parts = [p for p in (campaign_name, device_id) if p]
|
||||
filename = "/".join(path_parts + [basename])
|
||||
filename = os.path.basename(fpath)
|
||||
metadata = {
|
||||
"filename": filename,
|
||||
"repo_owner": repo_owner,
|
||||
|
|
@ -756,7 +671,7 @@ class NodeAgent:
|
|||
headers=headers,
|
||||
files={"file": (filename, chunk, "application/octet-stream")},
|
||||
data={**metadata, "upload_id": upload_id, "chunk_index": i, "total_chunks": total_chunks},
|
||||
timeout=(30, None), # 30s connect, no read timeout — server may take minutes on final chunk
|
||||
timeout=120,
|
||||
verify=verify,
|
||||
)
|
||||
if not resp.ok:
|
||||
|
|
@ -933,21 +848,6 @@ def main() -> None:
|
|||
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
|
||||
help="Logging verbosity (default: INFO)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--role",
|
||||
default=None,
|
||||
choices=["general", "rx", "tx"],
|
||||
help=("Node role reported to the hub. " "'tx' enables synthetic transmission commands. " "Default: general"),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--session-code",
|
||||
default=None,
|
||||
metavar="CODE",
|
||||
help=(
|
||||
"3-word session code to pair this TX agent with a waiting campaign, "
|
||||
"e.g. 'amber-peak-transmit'. Supplied by the campaign UI."
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
|
@ -961,8 +861,6 @@ def main() -> None:
|
|||
device = args.device or cfg.get("device", "unknown")
|
||||
insecure = args.insecure if args.insecure is not None else cfg.get("insecure", False)
|
||||
log_level = args.log_level or cfg.get("log_level", "INFO")
|
||||
role = args.role or cfg.get("role", "general")
|
||||
session_code = args.session_code or cfg.get("session_code")
|
||||
|
||||
if not hub:
|
||||
parser.error("--hub is required (or set 'hub' in the config file)")
|
||||
|
|
@ -990,8 +888,6 @@ def main() -> None:
|
|||
name=name,
|
||||
sdr_device=device,
|
||||
insecure=insecure,
|
||||
role=role,
|
||||
session_code=session_code,
|
||||
)
|
||||
agent.run()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from ria_toolkit_oss.data.annotation import Annotation
|
||||
from ria_toolkit_oss.datatypes.annotation import Annotation
|
||||
|
||||
# TODO figure out how to transfer labels in the merge case
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import Optional
|
|||
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
|
||||
|
||||
def annotate_with_cusum(
|
||||
|
|
@ -24,7 +24,7 @@ def annotate_with_cusum(
|
|||
changes between a low and high amplitude.
|
||||
|
||||
:param recording: A ``Recording`` object to annotate.
|
||||
:type recording: ``ria_toolkit_oss.data.Recording``
|
||||
:type recording: ``ria_toolkit_oss.datatypes.Recording``
|
||||
:param label: Label for the detected segments.
|
||||
:type label: str
|
||||
:param window_size: The length (in samples) of the moving average window.
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from typing import Tuple
|
|||
import numpy as np
|
||||
from scipy.signal import filtfilt
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
|
||||
|
||||
def detect_signals_energy(
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ import numpy as np
|
|||
from scipy import ndimage
|
||||
from scipy import signal as scipy_signal
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
|
||||
|
||||
def find_spectral_components(
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
|
||||
def qualify_slice_from_annotations(recording: Recording, slice_length: int):
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
import numpy as np
|
||||
from scipy.signal import butter, lfilter
|
||||
|
||||
from ria_toolkit_oss.data.annotation import Annotation
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.annotation import Annotation
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
|
||||
|
||||
def isolate_signal(recording: Recording, annotation: Annotation) -> Recording:
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ from typing import Optional
|
|||
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
|
||||
|
||||
def _find_ranges(indices, max_gap):
|
||||
|
|
|
|||
|
|
@ -1,129 +1,128 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Optional
|
||||
|
||||
from sigmf import SigMFFile
|
||||
|
||||
|
||||
class Annotation:
|
||||
"""Signal annotations are labels or additional information associated with specific data points or segments within
|
||||
a signal. These annotations could be used for tasks like supervised learning, where the goal is to train a model
|
||||
to recognize patterns or characteristics in the signal associated with these annotations.
|
||||
|
||||
Annotations can be used to label interesting points in your recording.
|
||||
|
||||
:param sample_start: The index of the starting sample of the annotation.
|
||||
:type sample_start: int
|
||||
:param sample_count: The index of the ending sample of the annotation, inclusive.
|
||||
:type sample_count: int
|
||||
:param freq_lower_edge: The lower frequency of the annotation.
|
||||
:type freq_lower_edge: float
|
||||
:param freq_upper_edge: The upper frequency of the annotation.
|
||||
:type freq_upper_edge: float
|
||||
:param label: The label that will be displayed with the bounding box in compatible viewers including IQEngine.
|
||||
Defaults to an emtpy string.
|
||||
:type label: str, optional
|
||||
:param comment: A human-readable comment. Defaults to an empty string.
|
||||
:type comment: str, optional
|
||||
:param detail: A dictionary of user defined annotation-specific metadata. Defaults to None.
|
||||
:type detail: dict, optional
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sample_start: int,
|
||||
sample_count: int,
|
||||
freq_lower_edge: float,
|
||||
freq_upper_edge: float,
|
||||
label: Optional[str] = "",
|
||||
comment: Optional[str] = "",
|
||||
detail: Optional[dict] = None,
|
||||
):
|
||||
"""Initialize a new Annotation instance."""
|
||||
self.sample_start = int(sample_start)
|
||||
self.sample_count = int(sample_count)
|
||||
self.freq_lower_edge = float(freq_lower_edge)
|
||||
self.freq_upper_edge = float(freq_upper_edge)
|
||||
self.label = str(label)
|
||||
self.comment = str(comment)
|
||||
|
||||
if detail is None:
|
||||
self.detail = {}
|
||||
elif not _is_jsonable(detail):
|
||||
raise ValueError(f"Detail object is not json serializable: {detail}")
|
||||
else:
|
||||
self.detail = detail
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
"""
|
||||
Verify ``sample_count > 0`` and the ``freq_lower_edge < freq_upper_edge``.
|
||||
|
||||
:returns: True if valid, False if not.
|
||||
"""
|
||||
|
||||
return self.sample_count > 0 and self.freq_lower_edge < self.freq_upper_edge
|
||||
|
||||
def overlap(self, other):
|
||||
"""
|
||||
Quantify how much the bounding box in this annotation overlaps with another annotation.
|
||||
|
||||
:param other: The other annotation.
|
||||
:type other: Annotation
|
||||
|
||||
:returns: The area of the overlap in samples*frequency, or 0 if they do not overlap."""
|
||||
|
||||
sample_overlap_start = max(self.sample_start, other.sample_start)
|
||||
sample_overlap_end = min(self.sample_start + self.sample_count, other.sample_start + other.sample_count)
|
||||
|
||||
freq_overlap_start = max(self.freq_lower_edge, other.freq_lower_edge)
|
||||
freq_overlap_end = min(self.freq_upper_edge, other.freq_upper_edge)
|
||||
|
||||
if freq_overlap_start >= freq_overlap_end or sample_overlap_start >= sample_overlap_end:
|
||||
return 0
|
||||
else:
|
||||
return (sample_overlap_end - sample_overlap_start) * (freq_overlap_end - freq_overlap_start)
|
||||
|
||||
def area(self):
|
||||
"""
|
||||
The 'area' of the bounding box, samples*frequency.
|
||||
Useful to quantify annotation size.
|
||||
|
||||
:returns: sample length multiplied by bandwidth."""
|
||||
|
||||
return self.sample_count * (self.freq_upper_edge - self.freq_lower_edge)
|
||||
|
||||
def __eq__(self, other: Annotation) -> bool:
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def to_sigmf_format(self) -> dict:
|
||||
"""
|
||||
Returns a JSON dictionary representation, formatted for saving in a ``.sigmf-meta`` file.
|
||||
"""
|
||||
|
||||
annotation_dict = {SigMFFile.START_INDEX_KEY: self.sample_start, SigMFFile.LENGTH_INDEX_KEY: self.sample_count}
|
||||
|
||||
annotation_dict["metadata"] = {
|
||||
SigMFFile.LABEL_KEY: self.label,
|
||||
SigMFFile.COMMENT_KEY: self.comment,
|
||||
SigMFFile.FHI_KEY: self.freq_upper_edge,
|
||||
SigMFFile.FLO_KEY: self.freq_lower_edge,
|
||||
"ria:detail": self.detail,
|
||||
}
|
||||
|
||||
if _is_jsonable(annotation_dict):
|
||||
return annotation_dict
|
||||
else:
|
||||
raise ValueError("Annotation dictionary was not json serializable.")
|
||||
|
||||
|
||||
def _is_jsonable(x: Any) -> bool:
|
||||
"""
|
||||
:return: True if ``x`` is JSON serializable, False otherwise.
|
||||
:rtype: bool
|
||||
"""
|
||||
try:
|
||||
json.dumps(x)
|
||||
return True
|
||||
except (TypeError, OverflowError):
|
||||
return False
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Optional
|
||||
|
||||
from sigmf import SigMFFile
|
||||
|
||||
|
||||
class Annotation:
|
||||
"""Signal annotations are labels or additional information associated with specific data points or segments within
|
||||
a signal. These annotations could be used for tasks like supervised learning, where the goal is to train a model
|
||||
to recognize patterns or characteristics in the signal associated with these annotations.
|
||||
|
||||
Annotations can be used to label interesting points in your recording.
|
||||
|
||||
:param sample_start: The index of the starting sample of the annotation.
|
||||
:type sample_start: int
|
||||
:param sample_count: The index of the ending sample of the annotation, inclusive.
|
||||
:type sample_count: int
|
||||
:param freq_lower_edge: The lower frequency of the annotation.
|
||||
:type freq_lower_edge: float
|
||||
:param freq_upper_edge: The upper frequency of the annotation.
|
||||
:type freq_upper_edge: float
|
||||
:param label: The label that will be displayed with the bounding box in compatible viewers including IQEngine.
|
||||
Defaults to an emtpy string.
|
||||
:type label: str, optional
|
||||
:param comment: A human-readable comment. Defaults to an empty string.
|
||||
:type comment: str, optional
|
||||
:param detail: A dictionary of user defined annotation-specific metadata. Defaults to None.
|
||||
:type detail: dict, optional
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sample_start: int,
|
||||
sample_count: int,
|
||||
freq_lower_edge: float,
|
||||
freq_upper_edge: float,
|
||||
label: Optional[str] = "",
|
||||
comment: Optional[str] = "",
|
||||
detail: Optional[dict] = None,
|
||||
):
|
||||
"""Initialize a new Annotation instance."""
|
||||
self.sample_start = int(sample_start)
|
||||
self.sample_count = int(sample_count)
|
||||
self.freq_lower_edge = float(freq_lower_edge)
|
||||
self.freq_upper_edge = float(freq_upper_edge)
|
||||
self.label = str(label)
|
||||
self.comment = str(comment)
|
||||
|
||||
if detail is None:
|
||||
self.detail = {}
|
||||
elif not _is_jsonable(detail):
|
||||
raise ValueError(f"Detail object is not json serializable: {detail}")
|
||||
else:
|
||||
self.detail = detail
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
"""
|
||||
Check that the annotation sample count is > 0 and the freq_lower_edge<freq_upper_edge.
|
||||
|
||||
:returns: True if valid, False if not.
|
||||
"""
|
||||
|
||||
return self.sample_count > 0 and self.freq_lower_edge < self.freq_upper_edge
|
||||
|
||||
def overlap(self, other):
|
||||
"""
|
||||
Quantify how much the bounding box in this annotation overlaps with another annotation.
|
||||
|
||||
:param other: The other annotation.
|
||||
:type other: Annotation
|
||||
|
||||
:returns: The area of the overlap in samples*frequency, or 0 if they do not overlap."""
|
||||
|
||||
sample_overlap_start = max(self.sample_start, other.sample_start)
|
||||
sample_overlap_end = min(self.sample_start + self.sample_count, other.sample_start + other.sample_count)
|
||||
|
||||
freq_overlap_start = max(self.freq_lower_edge, other.freq_lower_edge)
|
||||
freq_overlap_end = min(self.freq_upper_edge, other.freq_upper_edge)
|
||||
|
||||
if freq_overlap_start >= freq_overlap_end or sample_overlap_start >= sample_overlap_end:
|
||||
return 0
|
||||
else:
|
||||
return (sample_overlap_end - sample_overlap_start) * (freq_overlap_end - freq_overlap_start)
|
||||
|
||||
def area(self):
|
||||
"""
|
||||
The 'area' of the bounding box, samples*frequency.
|
||||
Useful to quantify annotation size.
|
||||
|
||||
:returns: sample length multiplied by bandwidth."""
|
||||
|
||||
return self.sample_count * (self.freq_upper_edge - self.freq_lower_edge)
|
||||
|
||||
def __eq__(self, other: Annotation) -> bool:
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def to_sigmf_format(self):
|
||||
"""
|
||||
Returns a JSON dictionary representing this annotation formatted to be saved in a .sigmf-meta file.
|
||||
"""
|
||||
|
||||
annotation_dict = {SigMFFile.START_INDEX_KEY: self.sample_start, SigMFFile.LENGTH_INDEX_KEY: self.sample_count}
|
||||
|
||||
annotation_dict["metadata"] = {
|
||||
SigMFFile.LABEL_KEY: self.label,
|
||||
SigMFFile.COMMENT_KEY: self.comment,
|
||||
SigMFFile.FHI_KEY: self.freq_upper_edge,
|
||||
SigMFFile.FLO_KEY: self.freq_lower_edge,
|
||||
"ria:detail": self.detail,
|
||||
}
|
||||
|
||||
if _is_jsonable(annotation_dict):
|
||||
return annotation_dict
|
||||
else:
|
||||
raise ValueError("Annotation dictionary was not json serializable.")
|
||||
|
||||
|
||||
def _is_jsonable(x: Any) -> bool:
|
||||
"""
|
||||
:return: True if x is JSON serializable, False otherwise.
|
||||
"""
|
||||
try:
|
||||
json.dumps(x)
|
||||
return True
|
||||
except (TypeError, OverflowError):
|
||||
return False
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
8
src/ria_toolkit_oss/datatypes/__init__.py
Normal file
8
src/ria_toolkit_oss/datatypes/__init__.py
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
"""
|
||||
The datatypes package contains abstract data types tailored for radio machine learning.
|
||||
"""
|
||||
|
||||
__all__ = ["Annotation", "Recording"]
|
||||
|
||||
from .annotation import Annotation
|
||||
from .recording import Recording
|
||||
129
src/ria_toolkit_oss/datatypes/annotation.py
Normal file
129
src/ria_toolkit_oss/datatypes/annotation.py
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Optional
|
||||
|
||||
from sigmf import SigMFFile
|
||||
|
||||
|
||||
class Annotation:
|
||||
"""Signal annotations are labels or additional information associated with specific data points or segments within
|
||||
a signal. These annotations could be used for tasks like supervised learning, where the goal is to train a model
|
||||
to recognize patterns or characteristics in the signal associated with these annotations.
|
||||
|
||||
Annotations can be used to label interesting points in your recording.
|
||||
|
||||
:param sample_start: The index of the starting sample of the annotation.
|
||||
:type sample_start: int
|
||||
:param sample_count: The index of the ending sample of the annotation, inclusive.
|
||||
:type sample_count: int
|
||||
:param freq_lower_edge: The lower frequency of the annotation.
|
||||
:type freq_lower_edge: float
|
||||
:param freq_upper_edge: The upper frequency of the annotation.
|
||||
:type freq_upper_edge: float
|
||||
:param label: The label that will be displayed with the bounding box in compatible viewers including IQEngine.
|
||||
Defaults to an emtpy string.
|
||||
:type label: str, optional
|
||||
:param comment: A human-readable comment. Defaults to an empty string.
|
||||
:type comment: str, optional
|
||||
:param detail: A dictionary of user defined annotation-specific metadata. Defaults to None.
|
||||
:type detail: dict, optional
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sample_start: int,
|
||||
sample_count: int,
|
||||
freq_lower_edge: float,
|
||||
freq_upper_edge: float,
|
||||
label: Optional[str] = "",
|
||||
comment: Optional[str] = "",
|
||||
detail: Optional[dict] = None,
|
||||
):
|
||||
"""Initialize a new Annotation instance."""
|
||||
self.sample_start = int(sample_start)
|
||||
self.sample_count = int(sample_count)
|
||||
self.freq_lower_edge = float(freq_lower_edge)
|
||||
self.freq_upper_edge = float(freq_upper_edge)
|
||||
self.label = str(label)
|
||||
self.comment = str(comment)
|
||||
|
||||
if detail is None:
|
||||
self.detail = {}
|
||||
elif not _is_jsonable(detail):
|
||||
raise ValueError(f"Detail object is not json serializable: {detail}")
|
||||
else:
|
||||
self.detail = detail
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
"""
|
||||
Verify ``sample_count > 0`` and the ``freq_lower_edge < freq_upper_edge``.
|
||||
|
||||
:returns: True if valid, False if not.
|
||||
"""
|
||||
|
||||
return self.sample_count > 0 and self.freq_lower_edge < self.freq_upper_edge
|
||||
|
||||
def overlap(self, other):
|
||||
"""
|
||||
Quantify how much the bounding box in this annotation overlaps with another annotation.
|
||||
|
||||
:param other: The other annotation.
|
||||
:type other: Annotation
|
||||
|
||||
:returns: The area of the overlap in samples*frequency, or 0 if they do not overlap."""
|
||||
|
||||
sample_overlap_start = max(self.sample_start, other.sample_start)
|
||||
sample_overlap_end = min(self.sample_start + self.sample_count, other.sample_start + other.sample_count)
|
||||
|
||||
freq_overlap_start = max(self.freq_lower_edge, other.freq_lower_edge)
|
||||
freq_overlap_end = min(self.freq_upper_edge, other.freq_upper_edge)
|
||||
|
||||
if freq_overlap_start >= freq_overlap_end or sample_overlap_start >= sample_overlap_end:
|
||||
return 0
|
||||
else:
|
||||
return (sample_overlap_end - sample_overlap_start) * (freq_overlap_end - freq_overlap_start)
|
||||
|
||||
def area(self):
|
||||
"""
|
||||
The 'area' of the bounding box, samples*frequency.
|
||||
Useful to quantify annotation size.
|
||||
|
||||
:returns: sample length multiplied by bandwidth."""
|
||||
|
||||
return self.sample_count * (self.freq_upper_edge - self.freq_lower_edge)
|
||||
|
||||
def __eq__(self, other: Annotation) -> bool:
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def to_sigmf_format(self) -> dict:
|
||||
"""
|
||||
Returns a JSON dictionary representation, formatted for saving in a ``.sigmf-meta`` file.
|
||||
"""
|
||||
|
||||
annotation_dict = {SigMFFile.START_INDEX_KEY: self.sample_start, SigMFFile.LENGTH_INDEX_KEY: self.sample_count}
|
||||
|
||||
annotation_dict["metadata"] = {
|
||||
SigMFFile.LABEL_KEY: self.label,
|
||||
SigMFFile.COMMENT_KEY: self.comment,
|
||||
SigMFFile.FHI_KEY: self.freq_upper_edge,
|
||||
SigMFFile.FLO_KEY: self.freq_lower_edge,
|
||||
"ria:detail": self.detail,
|
||||
}
|
||||
|
||||
if _is_jsonable(annotation_dict):
|
||||
return annotation_dict
|
||||
else:
|
||||
raise ValueError("Annotation dictionary was not json serializable.")
|
||||
|
||||
|
||||
def _is_jsonable(x: Any) -> bool:
|
||||
"""
|
||||
:return: True if ``x`` is JSON serializable, False otherwise.
|
||||
:rtype: bool
|
||||
"""
|
||||
try:
|
||||
json.dumps(x)
|
||||
return True
|
||||
except (TypeError, OverflowError):
|
||||
return False
|
||||
|
|
@ -7,8 +7,8 @@ from typing import Any, Optional
|
|||
|
||||
from packaging.version import Version
|
||||
|
||||
from ria_toolkit_oss.data.datasets.license.dataset_license import DatasetLicense
|
||||
from ria_toolkit_oss.data.datasets.radio_dataset import RadioDataset
|
||||
from ria_toolkit_oss.datatypes.datasets.license.dataset_license import DatasetLicense
|
||||
from ria_toolkit_oss.datatypes.datasets.radio_dataset import RadioDataset
|
||||
from ria_toolkit_oss.utils.abstract_attribute import abstract_attribute
|
||||
|
||||
|
||||
|
|
@ -7,11 +7,11 @@ from typing import Optional
|
|||
import h5py
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data.datasets.h5helpers import (
|
||||
from ria_toolkit_oss.datatypes.datasets.h5helpers import (
|
||||
append_entry_inplace,
|
||||
copy_dataset_entry_by_index,
|
||||
)
|
||||
from ria_toolkit_oss.data.datasets.radio_dataset import RadioDataset
|
||||
from ria_toolkit_oss.datatypes.datasets.radio_dataset import RadioDataset
|
||||
|
||||
|
||||
class IQDataset(RadioDataset, ABC):
|
||||
|
|
@ -19,7 +19,7 @@ class IQDataset(RadioDataset, ABC):
|
|||
radiofrequency (RF) signals represented as In-phase (I) and Quadrature (Q) samples.
|
||||
|
||||
For machine learning tasks that involve processing spectrograms, please use
|
||||
ria_toolkit_oss.data.datasets.SpectDataset instead.
|
||||
ria_toolkit_oss.datatypes.datasets.SpectDataset instead.
|
||||
|
||||
This is an abstract interface defining common properties and behaviour of IQDatasets. Therefore, this class
|
||||
should not be instantiated directly. Instead, it is subclassed to define custom interfaces for specific machine
|
||||
|
|
@ -12,7 +12,7 @@ import numpy as np
|
|||
import pandas as pd
|
||||
from numpy.typing import ArrayLike
|
||||
|
||||
from ria_toolkit_oss.data.datasets.h5helpers import (
|
||||
from ria_toolkit_oss.datatypes.datasets.h5helpers import (
|
||||
append_entry_inplace,
|
||||
copy_file,
|
||||
copy_over_example,
|
||||
|
|
@ -29,7 +29,7 @@ class RadioDataset(ABC):
|
|||
|
||||
This is an abstract interface defining common properties and behavior of radio datasets. Therefore, this class
|
||||
should not be instantiated directly. Instead, it should be subclassed to define specific interfaces for different
|
||||
types of radio datasets. For example, see ria_toolkit_oss.data.datasets.IQDataset, which is a radio dataset
|
||||
types of radio datasets. For example, see ria_toolkit_oss.datatypes.datasets.IQDataset, which is a radio dataset
|
||||
subclass tailored for tasks involving the processing of radio signals represented as IQ (In-phase and Quadrature)
|
||||
samples.
|
||||
|
||||
|
|
@ -3,7 +3,7 @@ from __future__ import annotations
|
|||
import os
|
||||
from abc import ABC
|
||||
|
||||
from ria_toolkit_oss.data.datasets.radio_dataset import RadioDataset
|
||||
from ria_toolkit_oss.datatypes.datasets.radio_dataset import RadioDataset
|
||||
|
||||
|
||||
class SpectDataset(RadioDataset, ABC):
|
||||
|
|
@ -13,7 +13,7 @@ class SpectDataset(RadioDataset, ABC):
|
|||
radio signal spectrograms.
|
||||
|
||||
For machine learning tasks that involve processing on IQ samples, please use
|
||||
ria_toolkit_oss.data.datasets.IQDataset instead.
|
||||
ria_toolkit_oss.datatypes.datasets.IQDataset instead.
|
||||
|
||||
This is an abstract interface defining common properties and behaviour of IQDatasets. Therefore, this class
|
||||
should not be instantiated directly. Instead, it is subclassed to define custom interfaces for specific machine
|
||||
|
|
@ -6,8 +6,11 @@ from typing import Optional
|
|||
import numpy as np
|
||||
from numpy.random import Generator
|
||||
|
||||
from ria_toolkit_oss.data.datasets import RadioDataset
|
||||
from ria_toolkit_oss.data.datasets.h5helpers import copy_over_example, make_empty_clone
|
||||
from ria_toolkit_oss.datatypes.datasets import RadioDataset
|
||||
from ria_toolkit_oss.datatypes.datasets.h5helpers import (
|
||||
copy_over_example,
|
||||
make_empty_clone,
|
||||
)
|
||||
|
||||
|
||||
def split(dataset: RadioDataset, lengths: list[int | float]) -> list[RadioDataset]:
|
||||
|
|
@ -28,7 +31,7 @@ def split(dataset: RadioDataset, lengths: list[int | float]) -> list[RadioDatase
|
|||
cases.
|
||||
|
||||
This function is deterministic, meaning it will always produce the same split. For a random split, see
|
||||
ria_toolkit_oss.data.datasets.random_split.
|
||||
ria_toolkit_oss.datatypes.datasets.random_split.
|
||||
|
||||
:param dataset: Dataset to be split.
|
||||
:type dataset: RadioDataset
|
||||
|
|
@ -47,7 +50,7 @@ def split(dataset: RadioDataset, lengths: list[int | float]) -> list[RadioDatase
|
|||
>>> import string
|
||||
>>> import numpy as np
|
||||
>>> import pandas as pd
|
||||
>>> from ria_toolkit_oss.data.datasets import split
|
||||
>>> from ria_toolkit_oss.datatypes.datasets import split
|
||||
|
||||
First, let's generate some random data:
|
||||
|
||||
|
|
@ -123,7 +126,7 @@ def random_split(
|
|||
training and test datasets.
|
||||
|
||||
This restriction makes it unlikely that a random split will produce datasets with the exact lengths specified.
|
||||
If it is important to ensure the closest possible split, consider using ria_toolkit_oss.data.datasets.split
|
||||
If it is important to ensure the closest possible split, consider using ria_toolkit_oss.datatypes.datasets.split
|
||||
instead.
|
||||
|
||||
:param dataset: Dataset to be split.
|
||||
|
|
@ -141,7 +144,7 @@ def random_split(
|
|||
:rtype: list of RadioDataset
|
||||
|
||||
See Also:
|
||||
ria_toolkit_oss.data.datasets.split: Usage is the same as for ``random_split()``.
|
||||
ria_toolkit_oss.datatypes.datasets.split: Usage is the same as for ``random_split()``.
|
||||
"""
|
||||
if not isinstance(dataset, RadioDataset):
|
||||
raise ValueError(f"'dataset' must be RadioDataset or one of its subclasses, got {type(dataset)}.")
|
||||
855
src/ria_toolkit_oss/datatypes/recording.py
Normal file
855
src/ria_toolkit_oss/datatypes/recording.py
Normal file
|
|
@ -0,0 +1,855 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import warnings
|
||||
from typing import Any, Iterator, Optional
|
||||
|
||||
import numpy as np
|
||||
from numpy.typing import ArrayLike
|
||||
|
||||
from ria_toolkit_oss.datatypes.annotation import Annotation
|
||||
|
||||
PROTECTED_KEYS = ["rec_id", "timestamp"]
|
||||
|
||||
|
||||
class Recording:
|
||||
"""Tape of complex IQ (in-phase and quadrature) samples with associated metadata and annotations.
|
||||
|
||||
Recording data is a complex array of shape C x N, where C is the number of channels
|
||||
and N is the number of samples in each channel.
|
||||
|
||||
Metadata is stored in a dictionary of key value pairs,
|
||||
to include information such as sample_rate and center_frequency.
|
||||
|
||||
Annotations are a list of :class:`~ria_toolkit_oss.datatypes.Annotation`,
|
||||
defining bounding boxes in time and frequency with labels and metadata.
|
||||
|
||||
Here, signal data is represented as a NumPy array. This class is then extended in the RIA Backends to provide
|
||||
support for different data structures, such as Tensors.
|
||||
|
||||
Recordings are long-form tapes can be obtained either from a software-defined radio (SDR) or generated
|
||||
synthetically. Then, machine learning datasets are curated from collection of recordings by segmenting these
|
||||
longer-form tapes into shorter units called slices.
|
||||
|
||||
All recordings are assigned a unique 64-character recording ID, ``rec_id``. If this field is missing from the
|
||||
provided metadata, a new ID will be generated upon object instantiation.
|
||||
|
||||
:param data: Signal data as a tape IQ samples, either C x N complex, where C is the number of
|
||||
channels and N is number of samples in the signal. If data is a one-dimensional array of complex samples with
|
||||
length N, it will be reshaped to a two-dimensional array with dimensions 1 x N.
|
||||
:type data: array_like
|
||||
|
||||
:param metadata: Additional information associated with the recording.
|
||||
:type metadata: dict, optional
|
||||
:param annotations: A collection of :class:`~ria_toolkit_oss.datatypes.Annotation` objects defining bounding boxes.
|
||||
:type annotations: list of Annotations, optional
|
||||
|
||||
:param dtype: Explicitly specify the data-type of the complex samples. Must be a complex NumPy type, such as
|
||||
``np.complex64`` or ``np.complex128``. Default is None, in which case the type is determined implicitly. If
|
||||
``data`` is a NumPy array, the Recording will use the dtype of ``data`` directly without any conversion.
|
||||
:type dtype: numpy dtype object, optional
|
||||
:param timestamp: The timestamp when the recording data was generated. If provided, it should be a float or integer
|
||||
representing the time in seconds since epoch (e.g., ``time.time()``). Only used if the `timestamp` field is not
|
||||
present in the provided metadata.
|
||||
:type dtype: float or int, optional
|
||||
|
||||
:raises ValueError: If data is not complex 1xN or CxN.
|
||||
:raises ValueError: If metadata is not a python dict.
|
||||
:raises ValueError: If metadata is not json serializable.
|
||||
:raises ValueError: If annotations is not a list of valid annotation objects.
|
||||
|
||||
**Examples:**
|
||||
|
||||
>>> import numpy
|
||||
>>> from ria_toolkit_oss.datatypes import Recording, Annotation
|
||||
|
||||
>>> # Create an array of complex samples, just 1s in this case.
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
|
||||
>>> # Create a dictionary of relevant metadata.
|
||||
>>> sample_rate = 1e6
|
||||
>>> center_frequency = 2.44e9
|
||||
>>> metadata = {
|
||||
... "sample_rate": sample_rate,
|
||||
... "center_frequency": center_frequency,
|
||||
... "author": "me",
|
||||
... }
|
||||
|
||||
>>> # Create an annotation for the annotations list.
|
||||
>>> annotations = [
|
||||
... Annotation(
|
||||
... sample_start=0,
|
||||
... sample_count=1000,
|
||||
... freq_lower_edge=center_frequency - (sample_rate / 2),
|
||||
... freq_upper_edge=center_frequency + (sample_rate / 2),
|
||||
... label="example",
|
||||
... )
|
||||
... ]
|
||||
|
||||
>>> # Store samples, metadata, and annotations together in a convenient object.
|
||||
>>> recording = Recording(data=samples, metadata=metadata, annotations=annotations)
|
||||
>>> print(recording.metadata)
|
||||
{'sample_rate': 1000000.0, 'center_frequency': 2440000000.0, 'author': 'me'}
|
||||
>>> print(recording.annotations[0].label)
|
||||
'example'
|
||||
"""
|
||||
|
||||
def __init__( # noqa C901
|
||||
self,
|
||||
data: ArrayLike | list[list],
|
||||
metadata: Optional[dict[str, any]] = None,
|
||||
dtype: Optional[np.dtype] = None,
|
||||
timestamp: Optional[float | int] = None,
|
||||
annotations: Optional[list[Annotation]] = None,
|
||||
):
|
||||
|
||||
data_arr = np.asarray(data)
|
||||
|
||||
if np.iscomplexobj(data_arr):
|
||||
# Expect C x N
|
||||
if data_arr.ndim == 1:
|
||||
self._data = np.expand_dims(data_arr, axis=0) # N -> 1 x N
|
||||
elif data_arr.ndim == 2:
|
||||
self._data = data_arr
|
||||
else:
|
||||
raise ValueError("Complex data must be C x N.")
|
||||
|
||||
else:
|
||||
raise ValueError("Input data must be complex.")
|
||||
|
||||
if dtype is not None:
|
||||
self._data = self._data.astype(dtype)
|
||||
|
||||
assert np.iscomplexobj(self._data)
|
||||
|
||||
if metadata is None:
|
||||
self._metadata = {}
|
||||
elif isinstance(metadata, dict):
|
||||
self._metadata = metadata
|
||||
else:
|
||||
raise ValueError(f"Metadata must be a python dict, but was {type(metadata)}.")
|
||||
|
||||
if not _is_jsonable(metadata):
|
||||
raise ValueError("Value must be JSON serializable.")
|
||||
|
||||
if "timestamp" not in self.metadata:
|
||||
if timestamp is not None:
|
||||
if not isinstance(timestamp, (int, float)):
|
||||
raise ValueError(f"timestamp must be int or float, not {type(timestamp)}")
|
||||
self._metadata["timestamp"] = timestamp
|
||||
else:
|
||||
self._metadata["timestamp"] = time.time()
|
||||
else:
|
||||
if not isinstance(self._metadata["timestamp"], (int, float)):
|
||||
raise ValueError(f"timestamp must be int or float, not {type(self._metadata['timestamp'])}")
|
||||
|
||||
if "rec_id" not in self.metadata:
|
||||
self._metadata["rec_id"] = generate_recording_id(data=self.data, timestamp=self._metadata["timestamp"])
|
||||
|
||||
if annotations is None:
|
||||
self._annotations = []
|
||||
elif isinstance(annotations, list):
|
||||
self._annotations = annotations
|
||||
else:
|
||||
raise ValueError("Annotations must be a list or None.")
|
||||
|
||||
if not all(isinstance(annotation, Annotation) for annotation in self._annotations):
|
||||
raise ValueError("All elements in self._annotations must be of type Annotation.")
|
||||
|
||||
self._index = 0
|
||||
|
||||
@property
|
||||
def data(self) -> np.ndarray:
|
||||
"""
|
||||
:return: Recording data, as a complex array.
|
||||
:type: np.ndarray
|
||||
|
||||
.. note::
|
||||
|
||||
For recordings with more than 1,024 samples, this property returns a read-only view of the data.
|
||||
|
||||
.. note::
|
||||
|
||||
To access specific samples, consider indexing the object directly with ``rec[c, n]``.
|
||||
"""
|
||||
if self._data.size > 1024:
|
||||
# Returning a read-only view prevents mutation at a distance while maintaining performance.
|
||||
v = self._data.view()
|
||||
v.setflags(write=False)
|
||||
return v
|
||||
else:
|
||||
return self._data.copy()
|
||||
|
||||
@property
|
||||
def metadata(self) -> dict:
|
||||
"""
|
||||
:return: Dictionary of recording metadata.
|
||||
:type: dict
|
||||
"""
|
||||
return self._metadata.copy()
|
||||
|
||||
@property
|
||||
def annotations(self) -> list[Annotation]:
|
||||
"""
|
||||
:return: List of recording annotations
|
||||
:type: list of Annotation objects
|
||||
"""
|
||||
return self._annotations.copy()
|
||||
|
||||
@property
|
||||
def shape(self) -> tuple[int]:
|
||||
"""
|
||||
:return: The shape of the data array.
|
||||
:type: tuple of ints
|
||||
"""
|
||||
return np.shape(self.data)
|
||||
|
||||
@property
|
||||
def n_chan(self) -> int:
|
||||
"""
|
||||
:return: The number of channels in the recording.
|
||||
:type: int
|
||||
"""
|
||||
return self.shape[0]
|
||||
|
||||
@property
|
||||
def rec_id(self) -> str:
|
||||
"""
|
||||
:return: Recording ID.
|
||||
:type: str
|
||||
"""
|
||||
return self.metadata["rec_id"]
|
||||
|
||||
@property
|
||||
def dtype(self) -> str:
|
||||
"""
|
||||
:return: Data-type of the data array's elements.
|
||||
:type: numpy dtype object
|
||||
"""
|
||||
return self.data.dtype
|
||||
|
||||
@property
|
||||
def timestamp(self) -> float | int:
|
||||
"""
|
||||
:return: Recording timestamp (time in seconds since epoch).
|
||||
:type: float or int
|
||||
"""
|
||||
return self.metadata["timestamp"]
|
||||
|
||||
@property
|
||||
def sample_rate(self) -> float | None:
|
||||
"""
|
||||
:return: Sample rate of the recording, or None is 'sample_rate' is not in metadata.
|
||||
:type: str
|
||||
"""
|
||||
return self.metadata.get("sample_rate")
|
||||
|
||||
@sample_rate.setter
|
||||
def sample_rate(self, sample_rate: float | int) -> None:
|
||||
"""Set the sample rate of the recording.
|
||||
|
||||
:param sample_rate: The sample rate of the recording.
|
||||
:type sample_rate: float or int
|
||||
|
||||
:return: None
|
||||
"""
|
||||
self.add_to_metadata(key="sample_rate", value=sample_rate)
|
||||
|
||||
def astype(self, dtype: np.dtype) -> Recording:
|
||||
"""Copy of the recording, data cast to a specified type.
|
||||
|
||||
.. todo: This method is not yet implemented.
|
||||
|
||||
:param dtype: Data-type to which the array is cast. Must be a complex scalar type, such as ``np.complex64`` or
|
||||
``np.complex128``.
|
||||
:type dtype: NumPy data type, optional
|
||||
|
||||
.. note: Casting to a data type with less precision can risk losing data by truncating or rounding values,
|
||||
potentially resulting in a loss of accuracy and significant information.
|
||||
|
||||
:return: A new recording with the same metadata and data, with dtype.
|
||||
|
||||
|
||||
**Examples:**
|
||||
|
||||
.. todo::
|
||||
|
||||
Usage examples coming soon!
|
||||
|
||||
"""
|
||||
# Rather than check for a valid datatype, let's cast and check the result. This makes it easier to provide
|
||||
# cross-platform support where the types are aliased across platforms.
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore") # Casting may generate user warnings. E.g., complex -> real
|
||||
data = self.data.astype(dtype)
|
||||
|
||||
if np.iscomplexobj(data):
|
||||
return Recording(data=data, metadata=self.metadata, annotations=self.annotations)
|
||||
else:
|
||||
raise ValueError("dtype must be a complex number scalar type.")
|
||||
|
||||
def add_to_metadata(self, key: str, value: Any) -> None:
|
||||
"""Add a new key-value pair to the recording metadata.
|
||||
|
||||
:param key: New metadata key, must be snake_case.
|
||||
:type key: str
|
||||
:param value: Corresponding metadata value.
|
||||
:type value: any
|
||||
|
||||
:raises ValueError: If key is already in metadata or if key is not a valid metadata key.
|
||||
:raises ValueError: If value is not JSON serializable.
|
||||
|
||||
:return: None.
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and add metadata:
|
||||
|
||||
>>> import numpy
|
||||
>>> from ria_toolkit_oss.datatypes import Recording
|
||||
>>>
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {
|
||||
>>> "sample_rate": 1e6,
|
||||
>>> "center_frequency": 2.44e9,
|
||||
>>> }
|
||||
>>>
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> print(recording.metadata)
|
||||
{'sample_rate': 1000000.0,
|
||||
'center_frequency': 2440000000.0,
|
||||
'timestamp': 17369...,
|
||||
'rec_id': 'fda0f41...'}
|
||||
>>>
|
||||
>>> recording.add_to_metadata(key="author", value="me")
|
||||
>>> print(recording.metadata)
|
||||
{'sample_rate': 1000000.0,
|
||||
'center_frequency': 2440000000.0,
|
||||
'author': 'me',
|
||||
'timestamp': 17369...,
|
||||
'rec_id': 'fda0f41...'}
|
||||
"""
|
||||
if key in self.metadata:
|
||||
raise ValueError(
|
||||
f"Key {key} already in metadata. Use Recording.update_metadata() to modify existing fields."
|
||||
)
|
||||
|
||||
if not _is_valid_metadata_key(key):
|
||||
raise ValueError(f"Invalid metadata key: {key}.")
|
||||
|
||||
if not _is_jsonable(value):
|
||||
raise ValueError("Value must be JSON serializable.")
|
||||
|
||||
self._metadata[key] = value
|
||||
|
||||
def update_metadata(self, key: str, value: Any) -> None:
|
||||
"""Update the value of an existing metadata key,
|
||||
or add the key value pair if it does not already exist.
|
||||
|
||||
:param key: Existing metadata key.
|
||||
:type key: str
|
||||
:param value: New value to enter at key.
|
||||
:type value: any
|
||||
|
||||
:raises ValueError: If value is not JSON serializable
|
||||
:raises ValueError: If key is protected.
|
||||
|
||||
:return: None.
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and update metadata:
|
||||
|
||||
>>> import numpy
|
||||
>>> from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {
|
||||
>>> "sample_rate": 1e6,
|
||||
>>> "center_frequency": 2.44e9,
|
||||
>>> "author": "me"
|
||||
>>> }
|
||||
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> print(recording.metadata)
|
||||
{'sample_rate': 1000000.0,
|
||||
'center_frequency': 2440000000.0,
|
||||
'author': "me",
|
||||
'timestamp': 17369...
|
||||
'rec_id': 'fda0f41...'}
|
||||
|
||||
>>> recording.update_metadata(key="author", value=you")
|
||||
>>> print(recording.metadata)
|
||||
{'sample_rate': 1000000.0,
|
||||
'center_frequency': 2440000000.0,
|
||||
'author': "you",
|
||||
'timestamp': 17369...
|
||||
'rec_id': 'fda0f41...'}
|
||||
"""
|
||||
if key not in self.metadata:
|
||||
self.add_to_metadata(key=key, value=value)
|
||||
return
|
||||
|
||||
if not _is_jsonable(value):
|
||||
raise ValueError("Value must be JSON serializable.")
|
||||
|
||||
if key in PROTECTED_KEYS: # Check protected keys.
|
||||
raise ValueError(f"Key {key} is protected and cannot be modified or removed.")
|
||||
|
||||
else:
|
||||
self._metadata[key] = value
|
||||
|
||||
def remove_from_metadata(self, key: str):
|
||||
"""
|
||||
Remove a key from the recording metadata.
|
||||
Does not remove key if it is protected.
|
||||
|
||||
:param key: The key to remove.
|
||||
:type key: str
|
||||
|
||||
:raises ValueError: If key is protected.
|
||||
|
||||
:return: None.
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and add metadata:
|
||||
|
||||
>>> import numpy
|
||||
>>> from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {
|
||||
... "sample_rate": 1e6,
|
||||
... "center_frequency": 2.44e9,
|
||||
... }
|
||||
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> print(recording.metadata)
|
||||
{'sample_rate': 1000000.0,
|
||||
'center_frequency': 2440000000.0,
|
||||
'timestamp': 17369..., # Example value
|
||||
'rec_id': 'fda0f41...'} # Example value
|
||||
|
||||
>>> recording.add_to_metadata(key="author", value="me")
|
||||
>>> print(recording.metadata)
|
||||
{'sample_rate': 1000000.0,
|
||||
'center_frequency': 2440000000.0,
|
||||
'author': 'me',
|
||||
'timestamp': 17369..., # Example value
|
||||
'rec_id': 'fda0f41...'} # Example value
|
||||
"""
|
||||
if key not in PROTECTED_KEYS:
|
||||
self._metadata.pop(key, None)
|
||||
else:
|
||||
raise ValueError(f"Key {key} is protected and cannot be modified or removed.")
|
||||
|
||||
def view(self, output_path: Optional[str] = "images/signal.png", **kwargs) -> None:
|
||||
"""Create a plot of various signal visualizations as a PNG image.
|
||||
|
||||
:param output_path: The output image path. Defaults to "images/signal.png".
|
||||
:type output_path: str, optional
|
||||
:param kwargs: Keyword arguments passed on to utils.view.view_sig.
|
||||
:type: dict of keyword arguments
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and view it as a plot in a .png image:
|
||||
|
||||
>>> import numpy
|
||||
>>> from utils.data import Recording
|
||||
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {
|
||||
>>> "sample_rate": 1e6,
|
||||
>>> "center_frequency": 2.44e9,
|
||||
>>> }
|
||||
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> recording.view()
|
||||
"""
|
||||
from ria_toolkit_oss.view.view_signal import view_sig
|
||||
|
||||
view_sig(recording=self, output_path=output_path, **kwargs)
|
||||
|
||||
def simple_view(self, **kwargs) -> None:
|
||||
"""Create a plot of various signal visualizations as a PNG or SVG image.
|
||||
|
||||
:param kwargs: Keyword arguments passed on to utils.view.view_signal_simple.create_plots.
|
||||
:type: dict of keyword arguments
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and view it as a plot in a .png image:
|
||||
|
||||
>>> import numpy
|
||||
>>> from utils.data import Recording
|
||||
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {
|
||||
>>> "sample_rate": 1e6,
|
||||
>>> "center_frequency": 2.44e9,
|
||||
>>> }
|
||||
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> recording.simple_view()
|
||||
"""
|
||||
from ria_toolkit_oss.view.view_signal_simple import view_simple_sig
|
||||
|
||||
view_simple_sig(recording=self, **kwargs)
|
||||
|
||||
def to_sigmf(
|
||||
self, filename: Optional[str] = None, path: Optional[os.PathLike | str] = None, overwrite: bool = False
|
||||
) -> None:
|
||||
"""Write recording to a set of SigMF files.
|
||||
|
||||
The SigMF io format is defined by the `SigMF Specification Project <https://github.com/sigmf/SigMF>`_
|
||||
|
||||
:param recording: The recording to be written to file.
|
||||
:type recording: ria_toolkit_oss.datatypes.Recording
|
||||
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
|
||||
:type filename: os.PathLike or str, optional
|
||||
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
|
||||
:type path: os.PathLike or str, optional
|
||||
|
||||
:raises IOError: If there is an issue encountered during the file writing process.
|
||||
|
||||
:return: None
|
||||
"""
|
||||
from ria_toolkit_oss.io.recording import to_sigmf
|
||||
|
||||
to_sigmf(filename=filename, path=path, recording=self, overwrite=overwrite)
|
||||
|
||||
def to_npy(
|
||||
self, filename: Optional[str] = None, path: Optional[os.PathLike | str] = None, overwrite: bool = False
|
||||
) -> str:
|
||||
"""Write recording to ``.npy`` binary file.
|
||||
|
||||
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
|
||||
:type filename: os.PathLike or str, optional
|
||||
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
|
||||
:type path: os.PathLike or str, optional
|
||||
|
||||
:raises IOError: If there is an issue encountered during the file writing process.
|
||||
|
||||
:return: Path where the file was saved.
|
||||
:rtype: str
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and save it to a .npy file:
|
||||
|
||||
>>> import numpy
|
||||
>>> from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {
|
||||
>>> "sample_rate": 1e6,
|
||||
>>> "center_frequency": 2.44e9,
|
||||
>>> }
|
||||
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> recording.to_npy()
|
||||
"""
|
||||
from ria_toolkit_oss.io.recording import to_npy
|
||||
|
||||
to_npy(recording=self, filename=filename, path=path, overwrite=overwrite)
|
||||
|
||||
def to_wav(
|
||||
self,
|
||||
filename: Optional[str] = None,
|
||||
path: Optional[os.PathLike | str] = None,
|
||||
target_sample_rate: Optional[int] = 48000,
|
||||
bits_per_sample: int = 32,
|
||||
overwrite: bool = False,
|
||||
) -> str:
|
||||
"""Write recording to WAV file with embedded YAML metadata.
|
||||
|
||||
WAV format uses stereo audio with I (in-phase) in left channel and Q (quadrature) in right channel.
|
||||
Metadata is stored in standard LIST INFO chunks with RF-specific metadata encoded as YAML
|
||||
in the ICMT (comment) field for human readability.
|
||||
|
||||
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
|
||||
:type filename: os.PathLike or str, optional
|
||||
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
|
||||
:type path: os.PathLike or str, optional
|
||||
:param target_sample_rate: Sample rate stored in the WAV header when no sample_rate metadata
|
||||
is present. IQ samples are written without decimation or interpolation. Default is 48000 Hz.
|
||||
:type target_sample_rate: int, optional
|
||||
:param bits_per_sample: Bits per sample (32 for float32, 16 for int16). Default is 32.
|
||||
:type bits_per_sample: int, optional
|
||||
:param overwrite: Whether to overwrite existing files. Default is False.
|
||||
:type overwrite: bool, optional
|
||||
|
||||
:raises IOError: If there is an issue encountered during the file writing process.
|
||||
|
||||
:return: Path where the file was saved.
|
||||
:rtype: str
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and save it to a .wav file:
|
||||
|
||||
>>> import numpy
|
||||
>>> from utils.data import Recording
|
||||
>>> samples = numpy.exp(1j * 2 * numpy.pi * 0.1 * numpy.arange(10000))
|
||||
>>> metadata = {"sample_rate": 1e6, "center_frequency": 915e6}
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> recording.to_wav()
|
||||
"""
|
||||
from ria_toolkit_oss.io.recording import to_wav
|
||||
|
||||
return to_wav(
|
||||
recording=self,
|
||||
filename=filename,
|
||||
path=path,
|
||||
target_sample_rate=target_sample_rate,
|
||||
bits_per_sample=bits_per_sample,
|
||||
overwrite=overwrite,
|
||||
)
|
||||
|
||||
def to_blue(
|
||||
self,
|
||||
filename: Optional[str] = None,
|
||||
path: Optional[os.PathLike | str] = None,
|
||||
data_format: str = "CI",
|
||||
overwrite: bool = False,
|
||||
) -> str:
|
||||
"""Write recording to MIDAS Blue file format.
|
||||
|
||||
MIDAS Blue is a legacy RF file format with a 512-byte binary header.
|
||||
Commonly used with X-Midas and other RF/radar signal processing tools.
|
||||
|
||||
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
|
||||
:type filename: os.PathLike or str, optional
|
||||
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
|
||||
:type path: os.PathLike or str, optional
|
||||
:param data_format: Format code (default 'CI' = complex int16).
|
||||
Common formats: 'CI' (complex int16), 'CF' (complex float32), 'CD' (complex float64).
|
||||
Integer formats require the IQ samples to already be scaled within [-1, 1).
|
||||
:type data_format: str, optional
|
||||
:param overwrite: Whether to overwrite existing files. Default is False.
|
||||
:type overwrite: bool, optional
|
||||
|
||||
:raises IOError: If there is an issue encountered during the file writing process.
|
||||
|
||||
:return: Path where the file was saved.
|
||||
:rtype: str
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and save it to a .blue file:
|
||||
|
||||
>>> import numpy
|
||||
>>> from utils.data import Recording
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {"sample_rate": 1e6, "center_frequency": 2.44e9}
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> recording.to_blue()
|
||||
"""
|
||||
from ria_toolkit_oss.io.recording import to_blue
|
||||
|
||||
return to_blue(recording=self, filename=filename, path=path, data_format=data_format, overwrite=overwrite)
|
||||
|
||||
def trim(self, num_samples: int, start_sample: Optional[int] = 0) -> Recording:
|
||||
"""Trim Recording samples to a desired length, shifting annotations to maintain alignment.
|
||||
|
||||
:param start_sample: The start index of the desired trimmed recording. Defaults to 0.
|
||||
:type start_sample: int, optional
|
||||
:param num_samples: The number of samples that the output trimmed recording will have.
|
||||
:type num_samples: int
|
||||
:raises IndexError: If start_sample + num_samples is greater than the length of the recording.
|
||||
:raises IndexError: If sample_start < 0 or num_samples < 0.
|
||||
|
||||
:return: The trimmed Recording.
|
||||
:rtype: Recording
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording and trim it:
|
||||
|
||||
>>> import numpy
|
||||
>>> from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64)
|
||||
>>> metadata = {
|
||||
... "sample_rate": 1e6,
|
||||
... "center_frequency": 2.44e9,
|
||||
... }
|
||||
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> print(len(recording))
|
||||
10000
|
||||
|
||||
>>> trimmed_recording = recording.trim(start_sample=1000, num_samples=1000)
|
||||
>>> print(len(trimmed_recording))
|
||||
1000
|
||||
"""
|
||||
|
||||
if start_sample < 0:
|
||||
raise IndexError("start_sample cannot be < 0.")
|
||||
elif start_sample + num_samples > len(self):
|
||||
raise IndexError(
|
||||
f"start_sample {start_sample} + num_samples {num_samples} > recording length {len(self)}."
|
||||
)
|
||||
|
||||
end_sample = start_sample + num_samples
|
||||
|
||||
data = self.data[:, start_sample:end_sample]
|
||||
|
||||
new_annotations = copy.deepcopy(self.annotations)
|
||||
trimmed_annotations = []
|
||||
for annotation in new_annotations:
|
||||
# skip annotations entirely outside the trim window
|
||||
if annotation.sample_start + annotation.sample_count <= start_sample:
|
||||
continue
|
||||
if annotation.sample_start >= end_sample:
|
||||
continue
|
||||
|
||||
# trim annotation if it goes outside the trim boundaries
|
||||
if annotation.sample_start < start_sample:
|
||||
annotation.sample_count = annotation.sample_count - (start_sample - annotation.sample_start)
|
||||
annotation.sample_start = start_sample
|
||||
|
||||
if annotation.sample_start + annotation.sample_count > end_sample:
|
||||
annotation.sample_count = end_sample - annotation.sample_start
|
||||
|
||||
# shift annotation to align with the new start point
|
||||
annotation.sample_start = annotation.sample_start - start_sample
|
||||
trimmed_annotations.append(annotation)
|
||||
|
||||
return Recording(data=data, metadata=self.metadata, annotations=trimmed_annotations)
|
||||
|
||||
def normalize(self) -> Recording:
|
||||
"""Scale the recording data, relative to its maximum value, so that the magnitude of the maximum sample is 1.
|
||||
|
||||
:return: Recording where the maximum sample amplitude is 1.
|
||||
:rtype: Recording
|
||||
|
||||
**Examples:**
|
||||
|
||||
Create a recording with maximum amplitude 0.5 and normalize to a maximum amplitude of 1:
|
||||
|
||||
>>> import numpy
|
||||
>>> from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
>>> samples = numpy.ones(10000, dtype=numpy.complex64) * 0.5
|
||||
>>> metadata = {
|
||||
... "sample_rate": 1e6,
|
||||
... "center_frequency": 2.44e9,
|
||||
... }
|
||||
|
||||
>>> recording = Recording(data=samples, metadata=metadata)
|
||||
>>> print(numpy.max(numpy.abs(recording.data)))
|
||||
0.5
|
||||
|
||||
>>> normalized_recording = recording.normalize()
|
||||
>>> print(numpy.max(numpy.abs(normalized_recording.data)))
|
||||
1
|
||||
"""
|
||||
max_val = np.max(abs(self.data))
|
||||
if max_val == 0:
|
||||
raise ValueError("Cannot normalize a recording with all-zero data.")
|
||||
scaled_data = self.data / max_val
|
||||
return Recording(data=scaled_data, metadata=self.metadata, annotations=self.annotations)
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""The length of a recording is defined by the number of complex samples in each channel of the recording."""
|
||||
return self.shape[1]
|
||||
|
||||
def __eq__(self, other: Recording) -> bool:
|
||||
"""Two Recordings are equal if all data, metadata, and annotations are the same."""
|
||||
|
||||
# counter used to allow for differently ordered annotation lists
|
||||
return (
|
||||
np.array_equal(self.data, other.data)
|
||||
and self.metadata == other.metadata
|
||||
and self.annotations == other.annotations
|
||||
)
|
||||
|
||||
def __ne__(self, other: Recording) -> bool:
|
||||
"""Two Recordings are equal if all data, and metadata, and annotations are the same."""
|
||||
return not self.__eq__(other=other)
|
||||
|
||||
def __iter__(self) -> Iterator:
|
||||
self._index = 0
|
||||
return self
|
||||
|
||||
def __next__(self) -> np.ndarray:
|
||||
if self._index < self.n_chan:
|
||||
to_ret = self.data[self._index]
|
||||
self._index += 1
|
||||
return to_ret
|
||||
else:
|
||||
raise StopIteration
|
||||
|
||||
def __getitem__(self, key: int | tuple[int] | slice) -> np.ndarray | np.complexfloating:
|
||||
"""If key is an integer, tuple of integers, or a slice, return the corresponding samples.
|
||||
|
||||
For arrays with 1,024 or fewer samples, return a copy of the recording data. For larger arrays, return a
|
||||
read-only view. This prevents mutation at a distance while maintaining performance.
|
||||
"""
|
||||
if isinstance(key, (int, tuple, slice)):
|
||||
v = self._data[key]
|
||||
if isinstance(v, np.complexfloating):
|
||||
return v
|
||||
elif v.size > 1024:
|
||||
v.setflags(write=False) # Make view read-only.
|
||||
return v
|
||||
else:
|
||||
return v.copy()
|
||||
|
||||
else:
|
||||
raise ValueError(f"Key must be an integer, tuple, or slice but was {type(key)}.")
|
||||
|
||||
def __setitem__(self, *args, **kwargs) -> None:
|
||||
"""Raise an error if an attempt is made to assign to the recording."""
|
||||
raise ValueError("Assignment to Recording is not allowed.")
|
||||
|
||||
|
||||
def generate_recording_id(data: np.ndarray, timestamp: Optional[float | int] = None) -> str:
|
||||
"""Generate unique 64-character recording ID. The recording ID is generated by hashing the recording data with
|
||||
the datetime that the recording data was generated. If no datatime is provided, the current datatime is used.
|
||||
|
||||
:param data: Tape of IQ samples, as a NumPy array.
|
||||
:type data: np.ndarray
|
||||
:param timestamp: Unix timestamp in seconds. Defaults to None.
|
||||
:type timestamp: float or int, optional
|
||||
|
||||
:return: 256-character hash, to be used as the recording ID.
|
||||
:rtype: str
|
||||
"""
|
||||
if timestamp is None:
|
||||
timestamp = time.time()
|
||||
|
||||
byte_sequence = data.tobytes() + str(timestamp).encode("utf-8")
|
||||
sha256_hash = hashlib.sha256(byte_sequence)
|
||||
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
|
||||
def _is_jsonable(x: Any) -> bool:
|
||||
"""
|
||||
:return: True if x is JSON serializable, False otherwise.
|
||||
"""
|
||||
try:
|
||||
json.dumps(x)
|
||||
return True
|
||||
except (TypeError, OverflowError):
|
||||
return False
|
||||
|
||||
|
||||
def _is_valid_metadata_key(key: Any) -> bool:
|
||||
"""
|
||||
:return: True if key is a valid metadata key, False otherwise.
|
||||
"""
|
||||
if isinstance(key, str) and key.islower() and re.match(pattern=r"^[a-z_]+$", string=key) is not None:
|
||||
return True
|
||||
|
||||
else:
|
||||
return False
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
Utilities for input/output operations on the ria_toolkit_oss.data.Recording object.
|
||||
Utilities for input/output operations on the ria_toolkit_oss.datatypes.Recording object.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
|
@ -19,8 +19,8 @@ from quantiphy import Quantity
|
|||
from sigmf import SigMFFile, sigmffile
|
||||
from sigmf.utils import get_data_type_str
|
||||
|
||||
from ria_toolkit_oss.data import Annotation
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
|
||||
_BLUE_META_PREFIX = "META_"
|
||||
_BLUE_META_TAG_MAX_LEN = 60
|
||||
|
|
@ -64,7 +64,7 @@ def to_npy(
|
|||
"""Write recording to ``.npy`` binary file.
|
||||
|
||||
:param recording: The recording to be written to file.
|
||||
:type recording: ria_toolkit_oss.data.Recording
|
||||
:type recording: ria_toolkit_oss.datatypes.Recording
|
||||
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
|
||||
:type filename: os.PathLike or str, optional
|
||||
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
|
||||
|
|
@ -135,7 +135,7 @@ def from_npy(file: os.PathLike | str, legacy: bool = False) -> Recording:
|
|||
:raises IOError: If there is an issue encountered during the file reading process.
|
||||
|
||||
:return: The recording, as initialized from the ``.npy`` file.
|
||||
:rtype: ria_toolkit_oss.data.Recording
|
||||
:rtype: ria_toolkit_oss.datatypes.Recording
|
||||
"""
|
||||
|
||||
filename, extension = os.path.splitext(file)
|
||||
|
|
@ -161,7 +161,7 @@ def from_npy(file: os.PathLike | str, legacy: bool = False) -> Recording:
|
|||
try:
|
||||
raw_ann = np.load(f, allow_pickle=False)
|
||||
ann_list = json.loads(raw_ann.tobytes().decode())
|
||||
from ria_toolkit_oss.data.annotation import Annotation
|
||||
from ria_toolkit_oss.datatypes.annotation import Annotation
|
||||
|
||||
annotations = [Annotation(**a) for a in ann_list]
|
||||
except EOFError:
|
||||
|
|
@ -198,7 +198,7 @@ def from_npy_legacy(file: os.PathLike | str) -> Recording:
|
|||
:raises IOError: If there is an issue encountered during the file reading process.
|
||||
|
||||
:return: The recording, as initialized from the legacy ``.npy`` file.
|
||||
:rtype: ria_toolkit_oss.data.Recording
|
||||
:rtype: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
**Examples:**
|
||||
|
||||
|
|
@ -270,7 +270,7 @@ def to_sigmf(
|
|||
The SigMF io format is defined by the `SigMF Specification Project <https://github.com/sigmf/SigMF>`_
|
||||
|
||||
:param recording: The recording to be written to file.
|
||||
:type recording: ria_toolkit_oss.data.Recording
|
||||
:type recording: ria_toolkit_oss.datatypes.Recording
|
||||
:param filename: The name of the file where the recording is to be saved. Defaults to auto generated filename.
|
||||
:type filename: os.PathLike or str, optional
|
||||
:param path: The directory path to where the recording is to be saved. Defaults to recordings/.
|
||||
|
|
@ -381,7 +381,7 @@ def from_sigmf(file: os.PathLike | str) -> Recording:
|
|||
:raises IOError: If there is an issue encountered during the file reading process.
|
||||
|
||||
:return: The recording, as initialized from the SigMF files.
|
||||
:rtype: ria_toolkit_oss.data.Recording
|
||||
:rtype: ria_toolkit_oss.datatypes.Recording
|
||||
"""
|
||||
|
||||
file = str(file)
|
||||
|
|
@ -443,7 +443,7 @@ def to_wav(
|
|||
in the ICMT (comment) field for human readability.
|
||||
|
||||
:param recording: The recording to be written to file.
|
||||
:type recording: ria_toolkit_oss.data.Recording
|
||||
:type recording: ria_toolkit_oss.datatypes.Recording
|
||||
:param filename: The name of the file where the recording is to be saved.
|
||||
Defaults to auto-generated filename.
|
||||
:type filename: str, optional
|
||||
|
|
@ -553,7 +553,7 @@ def from_wav(file: os.PathLike | str) -> Recording:
|
|||
:raises ValueError: If file is not stereo or has unsupported format.
|
||||
|
||||
:return: The recording, as initialized from the WAV file.
|
||||
:rtype: ria_toolkit_oss.data.Recording
|
||||
:rtype: ria_toolkit_oss.datatypes.Recording
|
||||
"""
|
||||
import wave
|
||||
|
||||
|
|
@ -635,7 +635,7 @@ def to_blue(
|
|||
Commonly used with X-Midas and other RF/radar signal processing tools.
|
||||
|
||||
:param recording: The recording to be written to file.
|
||||
:type recording: ria_toolkit_oss.data.Recording
|
||||
:type recording: ria_toolkit_oss.datatypes.Recording
|
||||
:param filename: The name of the file where the recording is to be saved.
|
||||
Defaults to auto-generated filename.
|
||||
:type filename: str, optional
|
||||
|
|
@ -792,7 +792,7 @@ def from_blue(file: os.PathLike | str) -> Recording:
|
|||
:raises ValueError: If file format is not valid or unsupported.
|
||||
|
||||
:return: The recording, as initialized from the Blue file.
|
||||
:rtype: ria_toolkit_oss.data.Recording
|
||||
:rtype: ria_toolkit_oss.datatypes.Recording
|
||||
"""
|
||||
filename = str(file)
|
||||
if not filename.endswith(".blue"):
|
||||
|
|
@ -917,7 +917,7 @@ def load_recording(file: os.PathLike) -> Recording:
|
|||
:raises ValueError: If the inferred file extension is not supported.
|
||||
|
||||
:return: The recording, as initialized from file(s).
|
||||
:rtype: ria_toolkit_oss.data.Recording
|
||||
:rtype: ria_toolkit_oss.datatypes.Recording
|
||||
"""
|
||||
_, extension = os.path.splitext(file)
|
||||
extension = extension.lstrip(".")
|
||||
|
|
|
|||
|
|
@ -233,9 +233,6 @@ class TransmitterConfig:
|
|||
# For sdr_remote control — keys: host, ssh_user, ssh_key_path, device_type, device_id, zmq_port
|
||||
sdr_remote: Optional[dict] = None
|
||||
|
||||
# For sdr_agent control — keys: modulation, order, symbol_rate, center_frequency, filter, rolloff
|
||||
sdr_agent: Optional[dict] = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d: dict) -> "TransmitterConfig":
|
||||
schedule = [CaptureStep.from_dict(s) for s in d.get("schedule", [])]
|
||||
|
|
@ -247,7 +244,6 @@ class TransmitterConfig:
|
|||
script=d.get("script"),
|
||||
device=d.get("device"),
|
||||
sdr_remote=d.get("sdr_remote"),
|
||||
sdr_agent=d.get("sdr_agent"),
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -276,7 +272,6 @@ class OutputConfig:
|
|||
path: str = "recordings"
|
||||
device_id: Optional[str] = None # for device-profile campaigns
|
||||
repo: Optional[str] = None
|
||||
folder: Optional[str] = None # repo subfolder: None = use campaign name, "" = no subfolder, str = custom
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d: dict) -> "OutputConfig":
|
||||
|
|
@ -285,7 +280,6 @@ class OutputConfig:
|
|||
path=str(d.get("path", "recordings")),
|
||||
device_id=d.get("device_id"),
|
||||
repo=d.get("repo"),
|
||||
folder=d.get("folder"),
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -299,7 +293,6 @@ class CampaignConfig:
|
|||
qa: QAConfig = field(default_factory=QAConfig)
|
||||
output: OutputConfig = field(default_factory=OutputConfig)
|
||||
mode: str = "controlled_testbed"
|
||||
loops: int = 1 # repeat full schedule this many times; labels get _run{N:02d} suffix
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Loaders
|
||||
|
|
@ -327,7 +320,6 @@ class CampaignConfig:
|
|||
return cls(
|
||||
name=safe_name,
|
||||
mode=str(campaign_meta.get("mode", "controlled_testbed")),
|
||||
loops=max(1, int(campaign_meta.get("loops", 1))),
|
||||
recorder=RecorderConfig.from_dict(raw["recorder"]),
|
||||
transmitters=transmitters,
|
||||
qa=QAConfig.from_dict(raw.get("qa", {})),
|
||||
|
|
@ -392,7 +384,6 @@ class CampaignConfig:
|
|||
return cls(
|
||||
name=safe_name,
|
||||
mode=str(campaign_meta.get("mode", "controlled_testbed")),
|
||||
loops=max(1, int(campaign_meta.get("loops", 1))),
|
||||
recorder=RecorderConfig.from_dict(raw["recorder"]),
|
||||
transmitters=transmitters,
|
||||
qa=QAConfig.from_dict(raw.get("qa", {})),
|
||||
|
|
@ -495,9 +486,9 @@ class CampaignConfig:
|
|||
)
|
||||
|
||||
def total_capture_time_s(self) -> float:
|
||||
"""Sum of all step durations across all transmitters and loops."""
|
||||
return sum(step.duration for tx in self.transmitters for step in tx.schedule) * self.loops
|
||||
"""Sum of all step durations across all transmitters."""
|
||||
return sum(step.duration for tx in self.transmitters for step in tx.schedule)
|
||||
|
||||
def total_steps(self) -> int:
|
||||
"""Total number of capture steps across all transmitters and loops."""
|
||||
return sum(len(tx.schedule) for tx in self.transmitters) * self.loops
|
||||
"""Total number of capture steps across all transmitters."""
|
||||
return sum(len(tx.schedule) for tx in self.transmitters)
|
||||
|
|
|
|||
|
|
@ -5,19 +5,17 @@ from __future__ import annotations
|
|||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field, replace
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Callable, Optional
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.io.recording import to_sigmf
|
||||
|
||||
from .campaign import CampaignConfig, CaptureStep, TransmitterConfig
|
||||
from .labeler import build_output_filename, label_recording
|
||||
from .qa import QAResult, check_recording
|
||||
from .tx_executor import TxExecutor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -171,21 +169,6 @@ def _run_script(script: str, *args: str, timeout: float = 15.0) -> str:
|
|||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _extract_tx_params(transmitter: TransmitterConfig) -> dict | None:
|
||||
"""Build a tx_params dict from a transmitter's signal config for SigMF labeling.
|
||||
|
||||
For sdr_agent transmitters, returns the synthetic generation parameters
|
||||
(modulation, order, symbol_rate, etc.) so recordings capture what was
|
||||
transmitted. Returns None for control methods without signal-level params.
|
||||
"""
|
||||
sdr_agent_cfg = getattr(transmitter, "sdr_agent", None)
|
||||
if not sdr_agent_cfg:
|
||||
return None
|
||||
# Extract known signal-level fields; ignore infra fields
|
||||
_INFRA_KEYS = {"node_id", "session_code"}
|
||||
return {k: v for k, v in sdr_agent_cfg.items() if k not in _INFRA_KEYS and v is not None}
|
||||
|
||||
|
||||
class CampaignExecutor:
|
||||
"""Executes a :class:`CampaignConfig` end-to-end.
|
||||
|
||||
|
|
@ -209,14 +192,11 @@ class CampaignExecutor:
|
|||
config: CampaignConfig,
|
||||
progress_cb: Optional[Callable[[int, int, StepResult], None]] = None,
|
||||
verbose: bool = False,
|
||||
skip_local_tx: bool = False,
|
||||
):
|
||||
self.config = config
|
||||
self.progress_cb = progress_cb
|
||||
self.skip_local_tx = skip_local_tx
|
||||
self._sdr = None
|
||||
self._remote_tx_controllers: dict = {}
|
||||
self._tx_executors: dict[str, tuple] = {} # tx_id → (TxExecutor, stop_event, thread)
|
||||
|
||||
if verbose:
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
|
@ -236,12 +216,10 @@ class CampaignExecutor:
|
|||
"""
|
||||
result = CampaignResult(campaign_name=self.config.name)
|
||||
|
||||
loops = self.config.loops
|
||||
logger.info(
|
||||
f"Starting campaign '{self.config.name}': "
|
||||
f"{self.config.total_steps()} steps"
|
||||
+ (f" ({self.config.total_steps() // loops} × {loops} loops)" if loops > 1 else "")
|
||||
+ f", ~{self.config.total_capture_time_s():.0f}s capture time"
|
||||
f"{self.config.total_steps()} steps, "
|
||||
f"~{self.config.total_capture_time_s():.0f}s capture time"
|
||||
)
|
||||
|
||||
self._init_sdr()
|
||||
|
|
@ -250,36 +228,29 @@ class CampaignExecutor:
|
|||
total = self.config.total_steps()
|
||||
step_index = 0
|
||||
|
||||
for loop_idx in range(loops):
|
||||
if loops > 1:
|
||||
logger.info(f"Loop {loop_idx + 1}/{loops}")
|
||||
for transmitter in self.config.transmitters:
|
||||
logger.info(f"Transmitter: {transmitter.id} ({len(transmitter.schedule)} steps)")
|
||||
for step in transmitter.schedule:
|
||||
looped_step = replace(step, label=f"{step.label}_run{loop_idx + 1:02d}") if loops > 1 else step
|
||||
step_result = self._execute_step(transmitter, looped_step)
|
||||
result.steps.append(step_result)
|
||||
step_index += 1
|
||||
for transmitter in self.config.transmitters:
|
||||
logger.info(f"Transmitter: {transmitter.id} ({len(transmitter.schedule)} steps)")
|
||||
for step in transmitter.schedule:
|
||||
step_result = self._execute_step(transmitter, step)
|
||||
result.steps.append(step_result)
|
||||
step_index += 1
|
||||
|
||||
if self.progress_cb:
|
||||
self.progress_cb(step_index, total, step_result)
|
||||
if self.progress_cb:
|
||||
self.progress_cb(step_index, total, step_result)
|
||||
|
||||
if step_result.error:
|
||||
logger.warning(f"Step '{looped_step.label}' error: {step_result.error}")
|
||||
elif step_result.qa.flagged:
|
||||
logger.warning(
|
||||
f"Step '{looped_step.label}' flagged for review: " + "; ".join(step_result.qa.issues)
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
f"Step '{looped_step.label}' OK "
|
||||
f"(SNR {step_result.qa.snr_db:.1f} dB, "
|
||||
f"{step_result.qa.duration_s:.1f}s)"
|
||||
)
|
||||
if step_result.error:
|
||||
logger.warning(f"Step '{step.label}' error: {step_result.error}")
|
||||
elif step_result.qa.flagged:
|
||||
logger.warning(f"Step '{step.label}' flagged for review: " + "; ".join(step_result.qa.issues))
|
||||
else:
|
||||
logger.info(
|
||||
f"Step '{step.label}' OK "
|
||||
f"(SNR {step_result.qa.snr_db:.1f} dB, "
|
||||
f"{step_result.qa.duration_s:.1f}s)"
|
||||
)
|
||||
finally:
|
||||
self._close_sdr()
|
||||
self._close_remote_tx_controllers()
|
||||
self._close_tx_executors()
|
||||
|
||||
result.end_time = time.time()
|
||||
logger.info(
|
||||
|
|
@ -354,12 +325,6 @@ class CampaignExecutor:
|
|||
logger.warning(f"Error closing remote Tx controller {tx_id}: {exc}")
|
||||
self._remote_tx_controllers.clear()
|
||||
|
||||
def _close_tx_executors(self) -> None:
|
||||
for tx_id, (_, stop_event, t) in list(self._tx_executors.items()):
|
||||
stop_event.set()
|
||||
t.join(timeout=5.0)
|
||||
self._tx_executors.clear()
|
||||
|
||||
def _record(self, duration_s: float) -> Recording:
|
||||
"""Capture ``duration_s`` seconds of IQ samples."""
|
||||
num_samples = int(duration_s * self.config.recorder.sample_rate)
|
||||
|
|
@ -404,7 +369,6 @@ class CampaignExecutor:
|
|||
step=step,
|
||||
capture_timestamp=capture_timestamp,
|
||||
campaign_name=self.config.name,
|
||||
tx_params=_extract_tx_params(transmitter),
|
||||
)
|
||||
|
||||
# QA
|
||||
|
|
@ -473,30 +437,6 @@ class CampaignExecutor:
|
|||
# Start transmission in background; _record() runs concurrently
|
||||
ctrl.transmit_async(step.duration + 1.0)
|
||||
|
||||
elif transmitter.control_method == "sdr_agent":
|
||||
if self.skip_local_tx:
|
||||
logger.debug(f"skip_local_tx — TX for '{transmitter.id}' delegated to TX agent node")
|
||||
return
|
||||
if not transmitter.sdr_agent:
|
||||
logger.warning(f"Transmitter '{transmitter.id}' has no sdr_agent config — skipping")
|
||||
return
|
||||
step_dict: dict = {"label": step.label, "duration": step.duration + 1.0}
|
||||
if step.power_dbm is not None:
|
||||
step_dict["power_dbm"] = step.power_dbm
|
||||
tx_config = {
|
||||
"id": transmitter.id,
|
||||
"sdr_agent": transmitter.sdr_agent,
|
||||
"schedule": [step_dict],
|
||||
}
|
||||
rec = self.config.recorder
|
||||
tx_device = transmitter.device or rec.device
|
||||
sdr_device = _DEVICE_ALIASES.get(tx_device.lower(), tx_device.lower())
|
||||
stop_event = threading.Event()
|
||||
executor = TxExecutor(tx_config, sdr_device=sdr_device, stop_event=stop_event)
|
||||
t = threading.Thread(target=executor.run, daemon=True, name=f"tx-{transmitter.id}")
|
||||
self._tx_executors[transmitter.id] = (executor, stop_event, t)
|
||||
t.start()
|
||||
|
||||
else:
|
||||
logger.warning(f"Unknown control method '{transmitter.control_method}' — skipping")
|
||||
|
||||
|
|
@ -519,13 +459,6 @@ class CampaignExecutor:
|
|||
if ctrl is not None:
|
||||
ctrl.wait_transmit(timeout=step.duration + 10.0)
|
||||
|
||||
elif transmitter.control_method == "sdr_agent":
|
||||
entry = self._tx_executors.pop(transmitter.id, None)
|
||||
if entry is not None:
|
||||
_, stop_event, t = entry
|
||||
stop_event.set()
|
||||
t.join(timeout=step.duration + 10.0)
|
||||
|
||||
@staticmethod
|
||||
def _step_params_json(transmitter: TransmitterConfig, step: CaptureStep) -> str:
|
||||
"""Serialise step parameters to a JSON string for the control script."""
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from __future__ import annotations
|
|||
|
||||
from typing import Optional
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
|
||||
from .campaign import CaptureStep
|
||||
|
||||
|
|
@ -15,7 +15,6 @@ def label_recording(
|
|||
step: CaptureStep,
|
||||
capture_timestamp: float,
|
||||
campaign_name: Optional[str] = None,
|
||||
tx_params: Optional[dict] = None,
|
||||
) -> Recording:
|
||||
"""Apply device identity and capture configuration labels to a recording's metadata.
|
||||
|
||||
|
|
@ -28,9 +27,6 @@ def label_recording(
|
|||
step: The capture step that was active during this recording.
|
||||
capture_timestamp: Unix timestamp (float) of when capture started.
|
||||
campaign_name: Optional campaign name for cross-recording reference.
|
||||
tx_params: Optional dict of transmitter signal parameters (e.g. modulation,
|
||||
order, symbol_rate) written as ``ria:tx_<key>`` fields so downstream
|
||||
training pipelines know what was transmitted into the recording.
|
||||
|
||||
Returns:
|
||||
The same recording with updated metadata.
|
||||
|
|
@ -61,11 +57,6 @@ def label_recording(
|
|||
if step.power_dbm is not None:
|
||||
recording.update_metadata("tx_power_dbm", step.power_dbm)
|
||||
|
||||
# Transmitter signal parameters (e.g. from sdr_agent synthetic generation)
|
||||
if tx_params:
|
||||
for key, value in tx_params.items():
|
||||
recording.update_metadata(f"tx_{key}", value)
|
||||
|
||||
return recording
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from dataclasses import dataclass, field
|
|||
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
|
||||
from .campaign import QAConfig
|
||||
|
||||
|
|
|
|||
|
|
@ -1,299 +0,0 @@
|
|||
"""TX campaign executor — synthesises and transmits signals via a local SDR.
|
||||
|
||||
The TxExecutor receives a transmitter config dict (matching the
|
||||
``sdr_agent`` control method's schema) and a step schedule, then for each
|
||||
step builds a signal chain with the block generator and transmits it via
|
||||
the local SDR device.
|
||||
|
||||
Supported modulations (``modulation`` field in config):
|
||||
BPSK, QPSK, 8PSK, 16QAM, 64QAM, 256QAM, FSK, OOK, GMSK, OQPSK
|
||||
|
||||
Example config dict (matches CampaignConfig transmitter with
|
||||
``control_method: sdr_agent``)::
|
||||
|
||||
{
|
||||
"id": "synthetic-tx",
|
||||
"type": "sdr",
|
||||
"control_method": "sdr_agent",
|
||||
"sdr_agent": {
|
||||
"modulation": "QPSK",
|
||||
"order": 4,
|
||||
"symbol_rate": 1000000,
|
||||
"center_frequency": 0.0,
|
||||
"filter": "rrc",
|
||||
"rolloff": 0.35
|
||||
},
|
||||
"schedule": [
|
||||
{"label": "step1", "duration": 10, "power_dbm": -10}
|
||||
]
|
||||
}
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _parse_hz(val: object) -> float:
|
||||
"""Parse a frequency value that may be a float (Hz) or a string like '2.45GHz'."""
|
||||
if isinstance(val, (int, float)):
|
||||
return float(val)
|
||||
s = str(val).strip()
|
||||
for suffix, mult in (("GHz", 1e9), ("MHz", 1e6), ("kHz", 1e3), ("Hz", 1.0)):
|
||||
if s.endswith(suffix):
|
||||
return float(s[: -len(suffix)]) * mult
|
||||
return float(s)
|
||||
|
||||
|
||||
def _parse_seconds(val: object) -> float:
|
||||
"""Parse a duration value that may be a float (seconds) or a string like '5s'."""
|
||||
if isinstance(val, (int, float)):
|
||||
return float(val)
|
||||
s = str(val).strip()
|
||||
return float(s[:-1]) if s.endswith("s") else float(s)
|
||||
|
||||
|
||||
# Mapping from modulation name → (PSK/QAM order, generator_type)
|
||||
# 'psk' uses PSKGenerator, 'qam' uses QAMGenerator
|
||||
_MOD_TABLE: dict[str, tuple[int, str]] = {
|
||||
"BPSK": (1, "psk"),
|
||||
"QPSK": (2, "psk"),
|
||||
"8PSK": (3, "psk"),
|
||||
"16QAM": (4, "qam"),
|
||||
"64QAM": (6, "qam"),
|
||||
"256QAM": (8, "qam"),
|
||||
}
|
||||
|
||||
_SPECIAL_MODS = {"FSK", "OOK", "GMSK", "OQPSK"}
|
||||
|
||||
# usrp-uhd-client's tx_recording() streams 2 000-sample chunks and loops the
|
||||
# source buffer for the full tx_time, so only this many samples ever need to
|
||||
# be in RAM regardless of step duration or sample rate.
|
||||
# 50 000 complex64 samples ≈ 400 kB — enough spectral diversity for looping.
|
||||
_SYNTH_BLOCK_SAMPLES = 50_000
|
||||
|
||||
|
||||
class TxExecutor:
|
||||
"""Synthesise and transmit a signal campaign via a local SDR.
|
||||
|
||||
Args:
|
||||
config: Transmitter config dict (must have ``sdr_agent`` sub-dict with
|
||||
modulation params, and ``schedule`` list of step dicts).
|
||||
sdr_device: SDR device name to open in TX mode (e.g. "pluto", "usrp").
|
||||
stop_event: External event that aborts the TX loop mid-step.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: dict,
|
||||
sdr_device: str = "unknown",
|
||||
stop_event: threading.Event | None = None,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.sdr_device = sdr_device
|
||||
self.stop_event = stop_event or threading.Event()
|
||||
self._sdr: Any = None
|
||||
|
||||
def run(self) -> None:
|
||||
"""Execute all steps in the schedule, transmitting for each step duration."""
|
||||
agent_cfg: dict = self.config.get("sdr_agent") or {}
|
||||
schedule: list[dict] = self.config.get("schedule") or []
|
||||
|
||||
if not schedule:
|
||||
logger.warning("TxExecutor: no schedule steps — nothing to transmit")
|
||||
return
|
||||
|
||||
modulation: str = agent_cfg.get("modulation", "QPSK").upper()
|
||||
symbol_rate: float = float(agent_cfg.get("symbol_rate", 1e6))
|
||||
center_freq: float = _parse_hz(agent_cfg.get("center_frequency", 0.0))
|
||||
filter_type: str = agent_cfg.get("filter", "rrc").lower()
|
||||
rolloff: float = float(agent_cfg.get("rolloff", 0.35))
|
||||
loops: int = max(1, int(self.config.get("loops", 1)))
|
||||
|
||||
# Upsampling factor: samples_per_symbol, fixed at 8 for SDR compatibility.
|
||||
sps = 8
|
||||
sample_rate = symbol_rate * sps
|
||||
|
||||
self._init_sdr(sample_rate, center_freq)
|
||||
try:
|
||||
for loop_idx in range(loops):
|
||||
if self.stop_event.is_set():
|
||||
break
|
||||
if loops > 1:
|
||||
logger.info("TX loop %d/%d", loop_idx + 1, loops)
|
||||
for step in schedule:
|
||||
if self.stop_event.is_set():
|
||||
break
|
||||
looped_step = (
|
||||
{**step, "label": f"{step.get('label', 'step')}_run{loop_idx + 1:02d}"} if loops > 1 else step
|
||||
)
|
||||
self._execute_step(looped_step, modulation, sps, symbol_rate, filter_type, rolloff)
|
||||
finally:
|
||||
self._close_sdr()
|
||||
|
||||
def _execute_step(
|
||||
self,
|
||||
step: dict,
|
||||
modulation: str,
|
||||
sps: int,
|
||||
symbol_rate: float,
|
||||
filter_type: str,
|
||||
rolloff: float,
|
||||
) -> None:
|
||||
duration: float = _parse_seconds(step.get("duration", 10.0))
|
||||
label: str = step.get("label", "step")
|
||||
gain: float = float(step.get("power_dbm") or 0.0)
|
||||
sample_rate = symbol_rate * sps
|
||||
|
||||
logger.info(
|
||||
"TX step '%s': %.0f s, %s @ %.3f MHz (sps=%d, filter=%s)",
|
||||
label,
|
||||
duration,
|
||||
modulation,
|
||||
symbol_rate / 1e6,
|
||||
sps,
|
||||
filter_type,
|
||||
)
|
||||
|
||||
num_samples = int(duration * sample_rate)
|
||||
|
||||
# Synthesise a short representative block. tx_recording() loops this
|
||||
# buffer for the full tx_time using a 2 000-sample streaming callback,
|
||||
# so peak memory is O(_SYNTH_BLOCK_SAMPLES) regardless of duration.
|
||||
block_size = min(num_samples, _SYNTH_BLOCK_SAMPLES)
|
||||
signal = self._synthesise(modulation, sps, block_size, filter_type, rolloff)
|
||||
|
||||
if self._sdr is not None:
|
||||
try:
|
||||
# Apply gain update if SDR supports it
|
||||
if hasattr(self._sdr, "set_tx_gain"):
|
||||
self._sdr.set_tx_gain(gain)
|
||||
self._sdr.tx_recording(signal, tx_time=duration)
|
||||
except Exception as exc:
|
||||
logger.error("TX step '%s' SDR error: %s", label, exc)
|
||||
else:
|
||||
# No SDR available — simulate by sleeping for the step duration.
|
||||
logger.warning("TX step '%s': no SDR — simulating %.0f s delay", label, duration)
|
||||
self.stop_event.wait(timeout=duration)
|
||||
|
||||
def _synthesise(
|
||||
self,
|
||||
modulation: str,
|
||||
sps: int,
|
||||
num_samples: int,
|
||||
filter_type: str,
|
||||
rolloff: float,
|
||||
):
|
||||
"""Build a block-generator chain and return IQ samples as a numpy array."""
|
||||
try:
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.signal.block_generator import (
|
||||
BinarySource,
|
||||
GMSKModulator,
|
||||
Mapper,
|
||||
OOKModulator,
|
||||
OQPSKModulator,
|
||||
RaisedCosineFilter,
|
||||
RootRaisedCosineFilter,
|
||||
Upsampling,
|
||||
)
|
||||
from ria_toolkit_oss.signal.block_generator.continuous_modulation.fsk_modulator import (
|
||||
FSKModulator,
|
||||
)
|
||||
except ImportError as exc:
|
||||
raise RuntimeError(f"ria_toolkit_oss block generator not available: {exc}") from exc
|
||||
|
||||
# ── Special modulations with their own source-connected modulator ──
|
||||
if modulation in ("OOK", "GMSK", "OQPSK"):
|
||||
src = BinarySource()
|
||||
if modulation == "OOK":
|
||||
mod = OOKModulator(src, samples_per_symbol=sps)
|
||||
elif modulation == "GMSK":
|
||||
mod = GMSKModulator(src, samples_per_symbol=sps)
|
||||
else:
|
||||
mod = OQPSKModulator(src, samples_per_symbol=sps)
|
||||
recording = mod.record(num_samples)
|
||||
flat = np.asarray(recording.data).flatten().astype(np.complex64)
|
||||
if len(flat) < num_samples:
|
||||
flat = np.tile(flat, num_samples // len(flat) + 1)
|
||||
return flat[:num_samples]
|
||||
|
||||
if modulation == "FSK":
|
||||
symbol_rate = num_samples / sps
|
||||
bits_per_sym = 1 # 2-FSK
|
||||
num_bits = max(num_samples // sps, 128) * bits_per_sym
|
||||
bits = BinarySource()((1, num_bits))
|
||||
mod = FSKModulator(
|
||||
num_bits_per_symbol=bits_per_sym,
|
||||
frequency_spacing=symbol_rate * 0.5,
|
||||
symbol_duration=1.0 / max(symbol_rate, 1.0),
|
||||
sampling_frequency=symbol_rate * sps,
|
||||
)
|
||||
flat = np.asarray(mod(bits)).flatten().astype(np.complex64)
|
||||
if len(flat) < num_samples:
|
||||
flat = np.tile(flat, num_samples // len(flat) + 1)
|
||||
return flat[:num_samples]
|
||||
|
||||
# ── PSK / QAM via Mapper → Upsampling → pulse filter ──────────────
|
||||
if modulation not in _MOD_TABLE:
|
||||
logger.warning("Unknown modulation %r — defaulting to QPSK", modulation)
|
||||
modulation = "QPSK"
|
||||
|
||||
bits_per_sym, gen_type = _MOD_TABLE[modulation]
|
||||
mod_family = "QAM" if gen_type == "qam" else "PSK"
|
||||
|
||||
source = BinarySource()
|
||||
mapper = Mapper(constellation_type=mod_family, num_bits_per_symbol=bits_per_sym)
|
||||
upsampler = Upsampling(factor=sps)
|
||||
|
||||
mapper.connect_input([source])
|
||||
upsampler.connect_input([mapper])
|
||||
|
||||
if filter_type in ("rrc",):
|
||||
pulse_filter = RootRaisedCosineFilter(span_in_symbols=6, upsampling_factor=sps, beta=rolloff)
|
||||
pulse_filter.connect_input([upsampler])
|
||||
recording = pulse_filter.record(num_samples)
|
||||
elif filter_type in ("rc",):
|
||||
pulse_filter = RaisedCosineFilter(span_in_symbols=6, upsampling_factor=sps, beta=rolloff)
|
||||
pulse_filter.connect_input([upsampler])
|
||||
recording = pulse_filter.record(num_samples)
|
||||
else:
|
||||
# "none", "rect", "gaussian" — use upsampler output directly
|
||||
recording = upsampler.record(num_samples)
|
||||
|
||||
flat = np.asarray(recording.data).flatten().astype(np.complex64)
|
||||
if len(flat) < num_samples:
|
||||
flat = np.tile(flat, num_samples // len(flat) + 1)
|
||||
return flat[:num_samples]
|
||||
|
||||
def _init_sdr(self, sample_rate: float, center_freq: float) -> None:
|
||||
try:
|
||||
from ria_toolkit_oss.sdr import get_sdr_device
|
||||
|
||||
self._sdr = get_sdr_device(self.sdr_device)
|
||||
self._sdr.init_tx(
|
||||
sample_rate=sample_rate,
|
||||
center_frequency=center_freq,
|
||||
gain=0,
|
||||
channel=0,
|
||||
gain_mode="manual",
|
||||
)
|
||||
logger.info(
|
||||
"TX SDR initialised: %s @ %.3f MHz, %.1f Msps", self.sdr_device, center_freq / 1e6, sample_rate / 1e6
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning("TX SDR init failed (%s) — will simulate: %s", self.sdr_device, exc)
|
||||
self._sdr = None
|
||||
|
||||
def _close_sdr(self) -> None:
|
||||
if self._sdr is not None:
|
||||
try:
|
||||
self._sdr.close()
|
||||
except Exception as exc:
|
||||
logger.debug("TX SDR close error: %s", exc)
|
||||
self._sdr = None
|
||||
|
|
@ -5,7 +5,7 @@ from typing import Optional
|
|||
import numpy as np
|
||||
from bladerf import _bladerf
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.sdr import SDR, SDRError, SDRParameterError
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from typing import Optional
|
|||
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.sdr._external.libhackrf import HackRF as hrf
|
||||
from ria_toolkit_oss.sdr.sdr import SDR, SDRParameterError
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ class HackRF(SDR):
|
|||
:param channel: The channel the HackRF is set to. (Not actually used)
|
||||
:type channel: int
|
||||
:param gain_mode: 'absolute' passes gain directly to the sdr,
|
||||
'relative' means that gain should be a negative value, and it will be subtracted from the max gain (40).
|
||||
'relative' means that gain should be a negative value, and it will be subtracted from the max gain (40).
|
||||
:type gain_mode: str
|
||||
"""
|
||||
print("Initializing RX")
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from typing import Optional
|
|||
import adi
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.sdr.sdr import (
|
||||
SDR,
|
||||
SDRError,
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ try:
|
|||
except ImportError as exc: # pragma: no cover - dependency provided by end user
|
||||
raise ImportError("pyrtlsdr is required to use the RTLSDR class") from exc
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.sdr.sdr import SDR, SDRParameterError
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from typing import Optional
|
|||
import numpy as np
|
||||
import zmq
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
|
||||
|
||||
class SDR(ABC):
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from typing import Optional
|
|||
import numpy as np
|
||||
import uhd
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.sdr.sdr import SDR, SDRParameterError
|
||||
|
||||
|
||||
|
|
@ -54,7 +54,7 @@ class USRP(SDR):
|
|||
:param channel: The channel the USRP is set to.
|
||||
:type channel: int
|
||||
:param gain_mode: 'absolute' passes gain directly to the sdr,
|
||||
'relative' means that gain should be a negative value, and it will be subtracted from the max gain.
|
||||
'relative' means that gain should be a negative value, and it will be subtracted from the max gain.
|
||||
:type gain_mode: str
|
||||
:param rx_buffer_size: Internal buffer size for receiving samples. Defaults to 960000.
|
||||
:type rx_buffer_size: int
|
||||
|
|
@ -285,7 +285,7 @@ class USRP(SDR):
|
|||
:param channel: The channel the USRP is set to.
|
||||
:type channel: int
|
||||
:param gain_mode: 'absolute' passes gain directly to the sdr,
|
||||
'relative' means that gain should be a negative value, and it will be subtracted from the max gain.
|
||||
'relative' means that gain should be a negative value, and it will be subtracted from the max gain.
|
||||
:type gain_mode: str
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
from fastapi import Depends, FastAPI
|
||||
|
||||
from .auth import require_api_key
|
||||
from .routers import conductor, inference
|
||||
from .routers import inference, orchestrator
|
||||
|
||||
|
||||
def create_app(api_key: str = "") -> FastAPI:
|
||||
|
|
@ -28,9 +28,9 @@ def create_app(api_key: str = "") -> FastAPI:
|
|||
app.state.api_key = api_key
|
||||
|
||||
app.include_router(
|
||||
conductor.router,
|
||||
prefix="/conductor",
|
||||
tags=["Conductor"],
|
||||
orchestrator.router,
|
||||
prefix="/orchestrator",
|
||||
tags=["Orchestrator"],
|
||||
dependencies=[Depends(require_api_key)],
|
||||
)
|
||||
app.include_router(
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from pathlib import Path
|
|||
from pydantic import BaseModel, field_validator
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Conductor
|
||||
# Orchestrator
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
"""Conductor routes: campaign deployment, status, and cancellation."""
|
||||
"""Orchestrator routes: campaign deployment, status, and cancellation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
|
@ -11,7 +11,7 @@ from scipy.signal import butter
|
|||
from scipy.signal import chirp as sci_chirp
|
||||
from scipy.signal import hilbert, lfilter
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
|
||||
|
||||
def sine(
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.signal.block_generator.generators.signal_generator import (
|
||||
SignalGenerator,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.signal.block_generator.generators.signal_generator import (
|
||||
SignalGenerator,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.signal.block_generator.generators.signal_generator import (
|
||||
SignalGenerator,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.signal import Recordable
|
||||
from ria_toolkit_oss.signal.block_generator.block import Block
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from datetime import datetime
|
|||
import click
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.signal.block_generator.mapping.mapper import Mapper
|
||||
from ria_toolkit_oss.signal.block_generator.multirate.upsampling import Upsampling
|
||||
from ria_toolkit_oss.signal.block_generator.pulse_shaping.raised_cosine_filter import (
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.signal.block_generator.data_types import DataType
|
||||
from ria_toolkit_oss.signal.block_generator.recordable_block import RecordableBlock
|
||||
from ria_toolkit_oss.signal.block_generator.source_block import SourceBlock
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from abc import ABC, abstractmethod
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
|
||||
class Recordable(ABC):
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from typing import Optional
|
|||
import numpy as np
|
||||
from numpy.typing import ArrayLike
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.utils.array_conversion import convert_to_2xn
|
||||
|
||||
# TODO: For round 2 of index generation, should j be at min 2 spots away from where it was to prevent adjacent patches.
|
||||
|
|
@ -29,7 +29,7 @@ def generate_awgn(signal: ArrayLike | Recording, snr: Optional[float] = 1) -> np
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param snr: The signal-to-noise ratio in dB. Default is 1.
|
||||
:type snr: float, optional
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ def generate_awgn(signal: ArrayLike | Recording, snr: Optional[float] = 1) -> np
|
|||
|
||||
:return: A numpy array representing the generated noise which matches the SNR of `signal`. If `signal` is a
|
||||
Recording, returns a Recording object with its `data` attribute containing the generated noise array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[2 + 5j, 1 + 8j]])
|
||||
>>> new_rec = generate_awgn(rec)
|
||||
|
|
@ -80,14 +80,14 @@ def time_reversal(signal: ArrayLike | Recording) -> np.ndarray | Recording:
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:raises ValueError: If `signal` is not CxN complex.
|
||||
|
||||
:return: A numpy array containing the reversed I and Q data samples if `signal` is an array.
|
||||
If `signal` is a `Recording`, returns a `Recording` object with its `data` attribute containing the
|
||||
reversed array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[1+2j, 3+4j, 5+6j]])
|
||||
>>> new_rec = time_reversal(rec)
|
||||
|
|
@ -123,14 +123,14 @@ def spectral_inversion(signal: ArrayLike | Recording) -> np.ndarray | Recording:
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:raises ValueError: If `signal` is not CxN complex.
|
||||
|
||||
:return: A numpy array containing the original I and negated Q data samples if `signal` is an array.
|
||||
If `signal` is a `Recording`, returns a `Recording` object with its `data` attribute containing the
|
||||
inverted array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[0+45j, 2-10j]])
|
||||
>>> new_rec = spectral_inversion(rec)
|
||||
|
|
@ -165,14 +165,14 @@ def channel_swap(signal: ArrayLike | Recording) -> np.ndarray | Recording:
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:raises ValueError: If `signal` is not CxN complex.
|
||||
|
||||
:return: A numpy array containing the swapped I and Q data samples if `signal` is an array.
|
||||
If `signal` is a `Recording`, returns a `Recording` object with its `data` attribute containing the
|
||||
swapped array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[10+20j, 7+35j]])
|
||||
>>> new_rec = channel_swap(rec)
|
||||
|
|
@ -207,14 +207,14 @@ def amplitude_reversal(signal: ArrayLike | Recording) -> np.ndarray | Recording:
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:raises ValueError: If `signal` is not CxN complex.
|
||||
|
||||
:return: A numpy array containing the negated I and Q data samples if `signal` is an array.
|
||||
If `signal` is a `Recording`, returns a `Recording` object with its `data` attribute containing the
|
||||
negated array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[4-3j, -5-2j, -9+1j]])
|
||||
>>> new_rec = amplitude_reversal(rec)
|
||||
|
|
@ -253,7 +253,7 @@ def drop_samples( # noqa: C901 # TODO: Simplify function
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param max_section_size: Maximum allowable size of the section to be dropped and replaced. Default is 2.
|
||||
:type max_section_size: int, optional
|
||||
:param fill_type: Fill option used to replace dropped section of data (back-fill, front-fill, mean, zeros).
|
||||
|
|
@ -275,7 +275,7 @@ def drop_samples( # noqa: C901 # TODO: Simplify function
|
|||
:return: A numpy array containing the I and Q data samples with replaced subsections if
|
||||
`signal` is an array. If `signal` is a `Recording`, returns a `Recording` object with its `data`
|
||||
attribute containing the array with dropped samples.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[2+5j, 1+8j, 6+4j, 3+7j, 4+9j]])
|
||||
>>> new_rec = drop_samples(rec)
|
||||
|
|
@ -346,7 +346,7 @@ def quantize_tape(
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param bin_number: The number of bins the signal should be divided into. Default is 4.
|
||||
:type bin_number: int, optional
|
||||
:param rounding_type: The type of rounding applied during processing. Default is "floor".
|
||||
|
|
@ -362,7 +362,7 @@ def quantize_tape(
|
|||
:return: A numpy array containing the quantized I and Q data samples if `signal` is an array.
|
||||
If `signal` is a `Recording`, returns a `Recording` object with its `data` attribute containing
|
||||
the quantized array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[1+1j, 4+4j, 1+2j, 1+4j]])
|
||||
>>> new_rec = quantize_tape(rec)
|
||||
|
|
@ -421,7 +421,7 @@ def quantize_parts(
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param max_section_size: Maximum allowable size of the section to be quantized. Default is 2.
|
||||
:type max_section_size: int, optional
|
||||
:param bin_number: The number of bins the signal should be divided into. Default is 4.
|
||||
|
|
@ -439,7 +439,7 @@ def quantize_parts(
|
|||
:return: A numpy array containing the I and Q data samples with quantized subsections if `signal`
|
||||
is an array. If `signal` is a `Recording`, returns a `Recording` object with its `data` attribute
|
||||
containing the partially quantized array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[2+5j, 1+8j, 6+4j, 3+7j, 4+9j]])
|
||||
>>> new_rec = quantize_parts(rec)
|
||||
|
|
@ -510,7 +510,7 @@ def magnitude_rescale(
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param starting_bounds: The bounds (inclusive) as indices in which the starting position of the rescaling occurs.
|
||||
Default is None, but if user does not assign any bounds, the bounds become (random index, N-1).
|
||||
:type starting_bounds: tuple, optional
|
||||
|
|
@ -522,7 +522,7 @@ def magnitude_rescale(
|
|||
:return: A numpy array containing the I and Q data samples with the rescaled magnitude after the random
|
||||
starting point if `signal` is an array. If `signal` is a `Recording`, returns a `Recording`
|
||||
object with its `data` attribute containing the rescaled array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[2+5j, 1+8j, 6+4j, 3+7j, 4+9j]])
|
||||
>>> new_rec = magniute_rescale(rec)
|
||||
|
|
@ -571,7 +571,7 @@ def cut_out( # noqa: C901 # TODO: Simplify function
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param max_section_size: Maximum allowable size of the section to be quantized. Default is 3.
|
||||
:type max_section_size: int, optional
|
||||
:param fill_type: Fill option used to replace cutout section of data (zeros, ones, low-snr, avg-snr-1, avg-snr-2).
|
||||
|
|
@ -596,7 +596,7 @@ def cut_out( # noqa: C901 # TODO: Simplify function
|
|||
:return: A numpy array containing the I and Q data samples with random sections cut out and replaced according to
|
||||
`fill_type` if `signal` is an array. If `signal` is a `Recording`, returns a `Recording` object
|
||||
with its `data` attribute containing the cut out and replaced array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[2+5j, 1+8j, 6+4j, 3+7j, 4+9j]])
|
||||
>>> new_rec = cut_out(rec)
|
||||
|
|
@ -666,7 +666,7 @@ def patch_shuffle(signal: ArrayLike | Recording, max_patch_size: Optional[int] =
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param max_patch_size: Maximum allowable patch size of the data that can be shuffled. Default is 3.
|
||||
:type max_patch_size: int, optional
|
||||
|
||||
|
|
@ -676,7 +676,7 @@ def patch_shuffle(signal: ArrayLike | Recording, max_patch_size: Optional[int] =
|
|||
:return: A numpy array containing the I and Q data samples with randomly shuffled regions if `signal` is
|
||||
an array. If `signal` is a `Recording`, returns a `Recording` object with its `data` attribute containing
|
||||
the shuffled array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[2+5j, 1+8j, 6+4j, 3+7j, 4+9j]])
|
||||
>>> new_rec = patch_shuffle(rec)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import numpy as np
|
|||
from numpy.typing import ArrayLike
|
||||
from scipy.signal import resample_poly
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.transforms import iq_augmentations
|
||||
|
||||
|
||||
|
|
@ -31,7 +31,7 @@ def add_awgn_to_signal(signal: ArrayLike | Recording, snr: Optional[float] = 1)
|
|||
|
||||
:param signal: Input IQ data as a complex ``C x N`` array or `Recording`, where ``C`` is the number of channels
|
||||
and ``N`` is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param snr: The signal-to-noise ratio in dB. Default is 1.
|
||||
:type snr: float, optional
|
||||
|
||||
|
|
@ -39,7 +39,7 @@ def add_awgn_to_signal(signal: ArrayLike | Recording, snr: Optional[float] = 1)
|
|||
|
||||
:return: A numpy array which is the sum of the noise (which matches the SNR) and the original signal. If `signal`
|
||||
is a `Recording`, returns a `Recording object` with its `data` attribute containing the noisy signal array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[1+1j, 2+2j]])
|
||||
>>> new_rec = add_awgn_to_signal(rec)
|
||||
|
|
@ -71,7 +71,7 @@ def time_shift(signal: ArrayLike | Recording, shift: Optional[int] = 1) -> np.nd
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param shift: The number of indices to shift by. Default is 1.
|
||||
:type shift: int, optional
|
||||
|
||||
|
|
@ -80,7 +80,7 @@ def time_shift(signal: ArrayLike | Recording, shift: Optional[int] = 1) -> np.nd
|
|||
|
||||
:return: A numpy array which represents the time-shifted signal. If `signal` is a `Recording`,
|
||||
returns a `Recording object` with its `data` attribute containing the time-shifted array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[1+1j, 2+2j, 3+3j, 4+4j, 5+5j]])
|
||||
>>> new_rec = time_shift(rec, -2)
|
||||
|
|
@ -134,7 +134,7 @@ def frequency_shift(signal: ArrayLike | Recording, shift: Optional[float] = 0.5)
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param shift: The frequency shift relative to the sample rate. Must be in the range ``[-0.5, 0.5]``.
|
||||
Default is 0.5.
|
||||
:type shift: float, optional
|
||||
|
|
@ -144,7 +144,7 @@ def frequency_shift(signal: ArrayLike | Recording, shift: Optional[float] = 0.5)
|
|||
|
||||
:return: A numpy array which represents the frequency-shifted signal. If `signal` is a `Recording`,
|
||||
returns a `Recording object` with its `data` attribute containing the frequency-shifted array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[1+1j, 2+2j, 3+3j, 4+4j]])
|
||||
>>> new_rec = frequency_shift(rec, -0.4)
|
||||
|
|
@ -189,7 +189,7 @@ def phase_shift(signal: ArrayLike | Recording, phase: Optional[float] = np.pi) -
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param phase: The phase angle by which to rotate the IQ samples, in radians. Must be in the range ``[-π, π]``.
|
||||
Default is π.
|
||||
:type phase: float, optional
|
||||
|
|
@ -199,7 +199,7 @@ def phase_shift(signal: ArrayLike | Recording, phase: Optional[float] = np.pi) -
|
|||
|
||||
:return: A numpy array which represents the phase-shifted signal. If `signal` is a `Recording`,
|
||||
returns a `Recording object` with its `data` attribute containing the phase-shifted array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[1+1j, 2+2j, 3+3j, 4+4j]])
|
||||
>>> new_rec = phase_shift(rec, np.pi/2)
|
||||
|
|
@ -246,7 +246,7 @@ def iq_imbalance(
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param amplitude_imbalance: The IQ amplitude imbalance to apply, in dB. Default is 1.5.
|
||||
:type amplitude_imbalance: float, optional
|
||||
:param phase_imbalance: The IQ phase imbalance to apply, in radians. Default is π.
|
||||
|
|
@ -260,7 +260,7 @@ def iq_imbalance(
|
|||
|
||||
:return: A numpy array which is the original signal with an applied IQ imbalance. If `signal` is a `Recording`,
|
||||
returns a `Recording object` with its `data` attribute containing the IQ imbalanced signal array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[2+18j, -34+2j, 3+9j]])
|
||||
>>> new_rec = iq_imbalance(rec, 1, np.pi, 2)
|
||||
|
|
@ -315,7 +315,7 @@ def resample(signal: ArrayLike | Recording, up: Optional[int] = 4, down: Optiona
|
|||
|
||||
:param signal: Input IQ data as a complex CxN array or `Recording`, where C is the number of channels and N
|
||||
is the length of the IQ examples.
|
||||
:type signal: array_like or ria_toolkit_oss.data.Recording
|
||||
:type signal: array_like or ria_toolkit_oss.datatypes.Recording
|
||||
:param up: The upsampling factor. Default is 4.
|
||||
:type up: int, optional
|
||||
:param down: The downsampling factor. Default is 2.
|
||||
|
|
@ -325,7 +325,7 @@ def resample(signal: ArrayLike | Recording, up: Optional[int] = 4, down: Optiona
|
|||
|
||||
:return: A numpy array which represents the resampled signal If `signal` is a `Recording`,
|
||||
returns a `Recording object` with its `data` attribute containing the resampled array.
|
||||
:rtype: np.ndarray or ria_toolkit_oss.data.Recording
|
||||
:rtype: np.ndarray or ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
>>> rec = Recording(data=[[1+1j, 2+2j]])
|
||||
>>> new_rec = resample(rec, 2, 1)
|
||||
|
|
|
|||
|
|
@ -4,14 +4,14 @@ import scipy.signal as signal
|
|||
from plotly.graph_objs import Figure
|
||||
from scipy.fft import fft, fftshift
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
|
||||
def spectrogram(rec: Recording, thumbnail: bool = False) -> Figure:
|
||||
"""Create a spectrogram for the recording.
|
||||
|
||||
:param rec: Signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
:param thumbnail: Whether to return a small thumbnail version or full plot.
|
||||
:type thumbnail: bool
|
||||
|
||||
|
|
@ -95,7 +95,7 @@ def iq_time_series(rec: Recording) -> Figure:
|
|||
"""Create a time series plot of the real and imaginary parts of signal.
|
||||
|
||||
:param rec: Signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: Time series plot as a Plotly figure.
|
||||
"""
|
||||
|
|
@ -125,7 +125,7 @@ def frequency_spectrum(rec: Recording) -> Figure:
|
|||
"""Create a frequency spectrum plot from the recording.
|
||||
|
||||
:param rec: Input signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: Frequency spectrum as a Plotly figure.
|
||||
"""
|
||||
|
|
@ -160,7 +160,7 @@ def constellation(rec: Recording) -> Figure:
|
|||
"""Create a constellation plot from the recording.
|
||||
|
||||
:param rec: Input signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: Constellation as a Plotly figure.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from scipy.fft import fft, fftshift
|
|||
from scipy.signal import spectrogram
|
||||
from scipy.signal.windows import hann
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.view.tools import (
|
||||
COLORS,
|
||||
decimate,
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import numpy as np
|
|||
from scipy.fft import fft, fftshift
|
||||
from scipy.signal.windows import hann
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.view.tools import (
|
||||
COLORS,
|
||||
decimate,
|
||||
|
|
|
|||
|
|
@ -4,14 +4,14 @@ import scipy.signal as signal
|
|||
from plotly.graph_objs import Figure
|
||||
from scipy.fft import fft, fftshift
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
|
||||
|
||||
def spectrogram(rec: Recording, thumbnail: bool = False) -> Figure:
|
||||
"""Create a spectrogram for the recording.
|
||||
|
||||
:param rec: Signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
:param thumbnail: Whether to return a small thumbnail version or full plot.
|
||||
:type thumbnail: bool
|
||||
|
||||
|
|
@ -107,7 +107,7 @@ def iq_time_series(rec: Recording) -> Figure:
|
|||
"""Create a time series plot of the real and imaginary parts of signal.
|
||||
|
||||
:param rec: Signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: Time series plot, as a Plotly Figure.
|
||||
"""
|
||||
|
|
@ -145,7 +145,7 @@ def frequency_spectrum(rec: Recording) -> Figure:
|
|||
"""Create a frequency spectrum plot from the recording.
|
||||
|
||||
:param rec: Input signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: Frequency spectrum, as a Plotly figure.
|
||||
"""
|
||||
|
|
@ -187,7 +187,7 @@ def constellation(rec: Recording) -> Figure:
|
|||
"""Create a constellation plot from the recording.
|
||||
|
||||
:param rec: Input signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: Constellation, as a Plotly Figure.
|
||||
"""
|
||||
|
|
@ -222,7 +222,7 @@ def power_spectral_density(rec: Recording) -> Figure:
|
|||
"""Create a Power Spectral Density (PSD) plot from the recording.
|
||||
|
||||
:param rec: Input signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: PSD plot, as a Plotly Figure.
|
||||
"""
|
||||
|
|
@ -268,7 +268,7 @@ def fft_plot(rec: Recording) -> Figure:
|
|||
"""Create an FFT magnitude plot from the recording.
|
||||
|
||||
:param rec: Input signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: FFT plot, as a Plotly Figure.
|
||||
"""
|
||||
|
|
@ -312,7 +312,7 @@ def spectrogram_3d(rec: Recording) -> Figure:
|
|||
"""Create a 3D spectrogram plot from the recording.
|
||||
|
||||
:param rec: Input signal to plot.
|
||||
:type rec: ria_toolkit_oss.data.Recording
|
||||
:type rec: ria_toolkit_oss.datatypes.Recording
|
||||
|
||||
:return: 3D Spectrogram, as a Plotly Figure.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ from ria_toolkit_oss.annotations import (
|
|||
split_recording_annotations,
|
||||
threshold_qualifier,
|
||||
)
|
||||
from ria_toolkit_oss.data import Annotation
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.io import load_recording, to_blue, to_npy, to_sigmf, to_wav
|
||||
from ria_toolkit_oss_cli.ria_toolkit_oss.common import (
|
||||
format_frequency,
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from pathlib import Path
|
|||
import click
|
||||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.io import from_npy_legacy, load_recording
|
||||
from ria_toolkit_oss_cli.ria_toolkit_oss.common import (
|
||||
echo_progress,
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional
|
|||
import click
|
||||
import yaml
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.io.recording import to_blue, to_npy, to_sigmf, to_wav
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import numpy as np
|
|||
import yaml
|
||||
|
||||
import ria_toolkit_oss.signal.basic_signal_generator as basic_gen
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.signal.block_generator.basic import FrequencyShift
|
||||
from ria_toolkit_oss.signal.block_generator.continuous_modulation.fsk_modulator import (
|
||||
FSKModulator,
|
||||
|
|
|
|||
|
|
@ -23,9 +23,9 @@ def serve(host: str, port: int, api_key: str, log_level: str):
|
|||
|
||||
\b
|
||||
Endpoints:
|
||||
POST /conductor/deploy
|
||||
GET /conductor/status/{campaign_id}
|
||||
POST /conductor/cancel/{campaign_id}
|
||||
POST /orchestrator/deploy
|
||||
GET /orchestrator/status/{campaign_id}
|
||||
POST /orchestrator/cancel/{campaign_id}
|
||||
POST /inference/load
|
||||
POST /inference/start
|
||||
POST /inference/stop
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from pathlib import Path
|
|||
|
||||
import click
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.io.recording import load_recording
|
||||
from ria_toolkit_oss.transforms import iq_augmentations, iq_impairments
|
||||
from ria_toolkit_oss_cli.ria_toolkit_oss.common import (
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import time
|
|||
|
||||
import click
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.io import from_npy_legacy, load_recording
|
||||
|
||||
from .common import (
|
||||
|
|
|
|||
95
tests/agent/test_cli_register_errors.py
Normal file
95
tests/agent/test_cli_register_errors.py
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
"""Structured error reporting for `ria-agent register` (T2)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
import urllib.error
|
||||
from io import BytesIO
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.agent import cli as agent_cli
|
||||
|
||||
|
||||
def _structured(reason: str) -> bytes:
|
||||
return json.dumps({"detail": {"reason": reason}}).encode()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"reason",
|
||||
["invalid_key", "expired", "revoked", "already_consumed"],
|
||||
)
|
||||
def test_explain_maps_known_reasons(reason):
|
||||
msg = agent_cli._explain_registration_failure(403, _structured(reason))
|
||||
assert msg == agent_cli.REGISTRATION_REASON_MESSAGES[reason]
|
||||
|
||||
|
||||
def test_explain_unknown_reason_falls_through_with_code():
|
||||
msg = agent_cli._explain_registration_failure(403, _structured("brand_new_thing"))
|
||||
assert "brand_new_thing" in msg
|
||||
assert "rejected" in msg.lower()
|
||||
|
||||
|
||||
def test_explain_string_detail():
|
||||
body = json.dumps({"detail": "Forbidden"}).encode()
|
||||
msg = agent_cli._explain_registration_failure(403, body)
|
||||
assert msg == "Registration rejected: Forbidden"
|
||||
|
||||
|
||||
def test_explain_429_with_string_detail():
|
||||
body = json.dumps({"detail": "Too many attempts; try again shortly"}).encode()
|
||||
msg = agent_cli._explain_registration_failure(429, body)
|
||||
assert "rate-limited" in msg
|
||||
assert "Too many attempts" in msg
|
||||
|
||||
|
||||
def test_explain_429_with_no_body():
|
||||
msg = agent_cli._explain_registration_failure(429, b"")
|
||||
assert "rate-limited" in msg
|
||||
|
||||
|
||||
def test_explain_malformed_json():
|
||||
msg = agent_cli._explain_registration_failure(500, b"<html>boom</html>")
|
||||
assert msg.startswith("HTTP 500")
|
||||
assert "boom" in msg
|
||||
|
||||
|
||||
def test_explain_empty_body():
|
||||
msg = agent_cli._explain_registration_failure(502, b"")
|
||||
assert msg == "HTTP 502: no body"
|
||||
|
||||
|
||||
def _http_error(status: int, body: bytes) -> urllib.error.HTTPError:
|
||||
return urllib.error.HTTPError(
|
||||
url="http://hub/screens/agents/register",
|
||||
code=status,
|
||||
msg="",
|
||||
hdrs=None, # type: ignore[arg-type]
|
||||
fp=BytesIO(body),
|
||||
)
|
||||
|
||||
|
||||
def test_register_surfaces_reason_on_http_error(tmp_path, capsys):
|
||||
cfg_path = tmp_path / "agent.json"
|
||||
err = _http_error(403, _structured("revoked"))
|
||||
|
||||
with (
|
||||
patch.dict("os.environ", {"RIA_AGENT_CONFIG": str(cfg_path)}, clear=False),
|
||||
patch("urllib.request.urlopen", side_effect=err),
|
||||
patch.object(
|
||||
sys,
|
||||
"argv",
|
||||
["ria-agent", "register", "--hub", "http://hub:3005", "--api-key", "ria_reg_x"],
|
||||
),
|
||||
):
|
||||
with pytest.raises(SystemExit) as exc:
|
||||
agent_cli.main()
|
||||
|
||||
assert exc.value.code == 1
|
||||
captured = capsys.readouterr()
|
||||
assert "revoked" in captured.err.lower()
|
||||
assert "Settings → RIA Agents" in captured.err
|
||||
# Config must NOT be written on failure.
|
||||
assert not cfg_path.exists()
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from ria_toolkit_oss.data import Annotation
|
||||
from ria_toolkit_oss.datatypes import Annotation
|
||||
|
||||
|
||||
def test_annotation_creation():
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ from typing import Iterable
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.data.recording import generate_recording_id
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes.recording import generate_recording_id
|
||||
|
||||
COMPLEX_DATA_1 = [[0.5 + 0.5j, 0.1 + 0.1j, 0.3 + 0.3j, 0.4 + 0.4j, 0.5 + 0.5j]]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import numpy as np
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
from ria_toolkit_oss.io.recording import (
|
||||
from_npy,
|
||||
from_sigmf,
|
||||
|
|
|
|||
|
|
@ -1,314 +0,0 @@
|
|||
"""Tests for orchestration executor — StepResult, CampaignResult, _run_script, _extract_tx_params."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import stat
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.orchestration.executor import (
|
||||
CampaignResult,
|
||||
StepResult,
|
||||
_extract_tx_params,
|
||||
_run_script,
|
||||
)
|
||||
from ria_toolkit_oss.orchestration.qa import QAResult
|
||||
|
||||
|
||||
def _ok_qa() -> QAResult:
|
||||
return QAResult(passed=True, flagged=False, snr_db=20.0, duration_s=1.0)
|
||||
|
||||
|
||||
def _flagged_qa() -> QAResult:
|
||||
return QAResult(passed=True, flagged=True, snr_db=5.0, duration_s=1.0, issues=["low SNR"])
|
||||
|
||||
|
||||
def _failed_qa() -> QAResult:
|
||||
return QAResult(passed=False, flagged=True, snr_db=0.0, duration_s=0.0, issues=["no signal"])
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# StepResult
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStepResult:
|
||||
def test_ok_true_when_no_error_and_qa_passed(self):
|
||||
r = StepResult(
|
||||
transmitter_id="tx1",
|
||||
step_label="step1",
|
||||
output_path="/out/rec.sigmf-data",
|
||||
qa=_ok_qa(),
|
||||
capture_timestamp=0.0,
|
||||
)
|
||||
assert r.ok is True
|
||||
|
||||
def test_ok_false_when_error_set(self):
|
||||
r = StepResult(
|
||||
transmitter_id="tx1",
|
||||
step_label="step1",
|
||||
output_path=None,
|
||||
qa=_ok_qa(),
|
||||
capture_timestamp=0.0,
|
||||
error="SDR failed",
|
||||
)
|
||||
assert r.ok is False
|
||||
|
||||
def test_ok_false_when_qa_not_passed(self):
|
||||
r = StepResult(
|
||||
transmitter_id="tx1",
|
||||
step_label="step1",
|
||||
output_path="/out",
|
||||
qa=_failed_qa(),
|
||||
capture_timestamp=0.0,
|
||||
)
|
||||
assert r.ok is False
|
||||
|
||||
def test_to_dict_contains_required_keys(self):
|
||||
r = StepResult(
|
||||
transmitter_id="tx1",
|
||||
step_label="step1",
|
||||
output_path="/out/rec.sigmf-data",
|
||||
qa=_ok_qa(),
|
||||
capture_timestamp=1234.5,
|
||||
)
|
||||
d = r.to_dict()
|
||||
assert d["transmitter_id"] == "tx1"
|
||||
assert d["step_label"] == "step1"
|
||||
assert d["output_path"] == "/out/rec.sigmf-data"
|
||||
assert d["capture_timestamp"] == pytest.approx(1234.5)
|
||||
assert d["error"] is None
|
||||
assert d["qa"]["passed"] is True
|
||||
|
||||
def test_to_dict_includes_error_when_set(self):
|
||||
r = StepResult(
|
||||
transmitter_id="tx1",
|
||||
step_label="step1",
|
||||
output_path=None,
|
||||
qa=_failed_qa(),
|
||||
capture_timestamp=0.0,
|
||||
error="disk full",
|
||||
)
|
||||
assert r.to_dict()["error"] == "disk full"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CampaignResult
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCampaignResult:
|
||||
def _make(self, steps: list) -> CampaignResult:
|
||||
r = CampaignResult(campaign_name="test_campaign")
|
||||
r.steps = steps
|
||||
r.end_time = r.start_time + 5.0
|
||||
return r
|
||||
|
||||
def test_total_steps(self):
|
||||
r = self._make(
|
||||
[
|
||||
StepResult("tx1", "s1", "/out", _ok_qa(), 0.0),
|
||||
StepResult("tx1", "s2", "/out", _ok_qa(), 0.0),
|
||||
]
|
||||
)
|
||||
assert r.total_steps == 2
|
||||
|
||||
def test_passed_count(self):
|
||||
r = self._make(
|
||||
[
|
||||
StepResult("tx1", "s1", "/out", _ok_qa(), 0.0),
|
||||
StepResult("tx1", "s2", "/out", _failed_qa(), 0.0),
|
||||
]
|
||||
)
|
||||
assert r.passed == 1
|
||||
|
||||
def test_failed_count(self):
|
||||
r = self._make(
|
||||
[
|
||||
StepResult("tx1", "s1", "/out", _ok_qa(), 0.0),
|
||||
StepResult("tx1", "s2", "/out", _failed_qa(), 0.0),
|
||||
]
|
||||
)
|
||||
assert r.failed == 1
|
||||
|
||||
def test_flagged_count(self):
|
||||
r = self._make(
|
||||
[
|
||||
StepResult("tx1", "s1", "/out", _ok_qa(), 0.0),
|
||||
StepResult("tx1", "s2", "/out", _flagged_qa(), 0.0),
|
||||
]
|
||||
)
|
||||
assert r.flagged == 1
|
||||
|
||||
def test_error_step_counts_as_failed_not_passed(self):
|
||||
r = self._make(
|
||||
[
|
||||
StepResult("tx1", "s1", None, _ok_qa(), 0.0, error="disk full"),
|
||||
]
|
||||
)
|
||||
assert r.failed == 1
|
||||
assert r.passed == 0
|
||||
|
||||
def test_duration_s_from_end_time(self):
|
||||
r = CampaignResult(campaign_name="c")
|
||||
r.start_time = 100.0
|
||||
r.end_time = 115.0
|
||||
assert r.duration_s == pytest.approx(15.0)
|
||||
|
||||
def test_to_dict_structure(self):
|
||||
r = self._make([StepResult("tx1", "s1", "/out", _ok_qa(), 0.0)])
|
||||
d = r.to_dict()
|
||||
assert d["campaign_name"] == "test_campaign"
|
||||
assert d["total_steps"] == 1
|
||||
assert d["passed"] == 1
|
||||
assert len(d["steps"]) == 1
|
||||
|
||||
def test_write_report(self, tmp_path):
|
||||
r = self._make([StepResult("tx1", "s1", "/out", _ok_qa(), 0.0)])
|
||||
out = tmp_path / "report.json"
|
||||
r.write_report(str(out))
|
||||
assert out.exists()
|
||||
data = json.loads(out.read_text())
|
||||
assert data["campaign_name"] == "test_campaign"
|
||||
|
||||
def test_write_report_creates_nested_dirs(self, tmp_path):
|
||||
r = self._make([])
|
||||
out = tmp_path / "nested" / "deep" / "report.json"
|
||||
r.write_report(str(out))
|
||||
assert out.exists()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _run_script
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRunScript:
|
||||
def _script(self, tmp_path, body: str) -> str:
|
||||
s = tmp_path / "script.sh"
|
||||
s.write_text("#!/bin/sh\n" + body)
|
||||
s.chmod(s.stat().st_mode | stat.S_IEXEC)
|
||||
return str(s)
|
||||
|
||||
def test_returns_stdout(self, tmp_path):
|
||||
out = _run_script(self._script(tmp_path, 'echo "hello world"'))
|
||||
assert out == "hello world"
|
||||
|
||||
def test_passes_args_to_script(self, tmp_path):
|
||||
out = _run_script(self._script(tmp_path, 'echo "$1 $2"'), "configure", "arg2")
|
||||
assert "configure" in out
|
||||
|
||||
def test_raises_on_nonzero_exit(self, tmp_path):
|
||||
with pytest.raises(RuntimeError, match="exited 1"):
|
||||
_run_script(self._script(tmp_path, "exit 1"))
|
||||
|
||||
def test_raises_on_relative_path(self):
|
||||
with pytest.raises(RuntimeError, match="absolute"):
|
||||
_run_script("relative/script.sh")
|
||||
|
||||
def test_raises_on_missing_file(self, tmp_path):
|
||||
with pytest.raises(RuntimeError):
|
||||
_run_script(str(tmp_path / "nonexistent.sh"))
|
||||
|
||||
def test_raises_on_timeout(self, tmp_path):
|
||||
with pytest.raises(RuntimeError, match="timed out"):
|
||||
_run_script(self._script(tmp_path, "sleep 60"), timeout=0.1)
|
||||
|
||||
def test_stderr_included_in_error_message(self, tmp_path):
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
_run_script(self._script(tmp_path, "echo 'bad thing' >&2; exit 1"))
|
||||
assert "bad thing" in str(exc_info.value)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _extract_tx_params
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestExtractTxParams:
|
||||
def test_returns_none_when_no_sdr_agent_attribute(self):
|
||||
tx = SimpleNamespace()
|
||||
assert _extract_tx_params(tx) is None
|
||||
|
||||
def test_returns_none_when_sdr_agent_is_none(self):
|
||||
tx = SimpleNamespace(sdr_agent=None)
|
||||
assert _extract_tx_params(tx) is None
|
||||
|
||||
def test_returns_none_when_sdr_agent_is_empty_dict(self):
|
||||
tx = SimpleNamespace(sdr_agent={})
|
||||
assert _extract_tx_params(tx) is None
|
||||
|
||||
def test_returns_signal_params(self):
|
||||
tx = SimpleNamespace(
|
||||
sdr_agent={
|
||||
"modulation": "QPSK",
|
||||
"symbol_rate": 1e6,
|
||||
"center_frequency": 2.4e9,
|
||||
}
|
||||
)
|
||||
result = _extract_tx_params(tx)
|
||||
assert result == {"modulation": "QPSK", "symbol_rate": 1e6, "center_frequency": 2.4e9}
|
||||
|
||||
def test_strips_infra_key_node_id(self):
|
||||
tx = SimpleNamespace(
|
||||
sdr_agent={
|
||||
"modulation": "BPSK",
|
||||
"node_id": "node_abc123",
|
||||
}
|
||||
)
|
||||
result = _extract_tx_params(tx)
|
||||
assert "node_id" not in result
|
||||
assert result == {"modulation": "BPSK"}
|
||||
|
||||
def test_strips_infra_key_session_code(self):
|
||||
tx = SimpleNamespace(
|
||||
sdr_agent={
|
||||
"modulation": "FSK",
|
||||
"session_code": "amber-peak-transmit",
|
||||
}
|
||||
)
|
||||
result = _extract_tx_params(tx)
|
||||
assert "session_code" not in result
|
||||
|
||||
def test_strips_none_values(self):
|
||||
tx = SimpleNamespace(
|
||||
sdr_agent={
|
||||
"modulation": "QPSK",
|
||||
"order": None,
|
||||
"rolloff": 0.35,
|
||||
}
|
||||
)
|
||||
result = _extract_tx_params(tx)
|
||||
assert "order" not in result
|
||||
assert result == {"modulation": "QPSK", "rolloff": 0.35}
|
||||
|
||||
def test_does_not_mutate_source_dict(self):
|
||||
cfg = {"modulation": "QPSK", "node_id": "nid", "session_code": "code"}
|
||||
tx = SimpleNamespace(sdr_agent=cfg)
|
||||
_extract_tx_params(tx)
|
||||
assert "node_id" in cfg
|
||||
|
||||
def test_full_sdr_agent_config(self):
|
||||
tx = SimpleNamespace(
|
||||
sdr_agent={
|
||||
"modulation": "16QAM",
|
||||
"order": 4,
|
||||
"symbol_rate": 5e6,
|
||||
"center_frequency": 915e6,
|
||||
"filter": "rrc",
|
||||
"rolloff": 0.35,
|
||||
"node_id": "node_xyz",
|
||||
"session_code": "some-code",
|
||||
}
|
||||
)
|
||||
result = _extract_tx_params(tx)
|
||||
assert result == {
|
||||
"modulation": "16QAM",
|
||||
"order": 4,
|
||||
"symbol_rate": 5e6,
|
||||
"center_frequency": 915e6,
|
||||
"filter": "rrc",
|
||||
"rolloff": 0.35,
|
||||
}
|
||||
|
|
@ -5,7 +5,7 @@ import time
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.orchestration.campaign import CaptureStep
|
||||
from ria_toolkit_oss.orchestration.labeler import build_output_filename, label_recording
|
||||
|
||||
|
|
@ -109,38 +109,6 @@ class TestLabelRecording:
|
|||
result = label_recording(rec, "iphone13_001", _wifi_step(), time.time())
|
||||
assert result is rec
|
||||
|
||||
def test_tx_params_none_by_default(self):
|
||||
rec = label_recording(_simple_recording(), "iphone13_001", _wifi_step(), time.time())
|
||||
tx_keys = [k for k in rec.metadata if k.startswith("tx_")]
|
||||
assert tx_keys == []
|
||||
|
||||
def test_tx_params_written_as_tx_prefix_keys(self):
|
||||
params = {"modulation": "QPSK", "symbol_rate": 1e6}
|
||||
rec = label_recording(_simple_recording(), "dev", _wifi_step(), time.time(), tx_params=params)
|
||||
assert rec.metadata["tx_modulation"] == "QPSK"
|
||||
assert rec.metadata["tx_symbol_rate"] == pytest.approx(1e6)
|
||||
|
||||
def test_tx_params_multiple_fields(self):
|
||||
params = {
|
||||
"modulation": "16QAM",
|
||||
"order": 4,
|
||||
"symbol_rate": 5e6,
|
||||
"center_frequency": 915e6,
|
||||
"filter": "rrc",
|
||||
"rolloff": 0.35,
|
||||
}
|
||||
rec = label_recording(_simple_recording(), "dev", _wifi_step(), time.time(), tx_params=params)
|
||||
for k, v in params.items():
|
||||
assert f"tx_{k}" in rec.metadata
|
||||
assert (
|
||||
rec.metadata[f"tx_{k}"] == pytest.approx(v) if isinstance(v, float) else rec.metadata[f"tx_{k}"] == v
|
||||
)
|
||||
|
||||
def test_tx_params_empty_dict_writes_nothing(self):
|
||||
rec = label_recording(_simple_recording(), "dev", _wifi_step(), time.time(), tx_params={})
|
||||
tx_keys = [k for k in rec.metadata if k.startswith("tx_") and k != "tx_power_dbm"]
|
||||
assert tx_keys == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# build_output_filename
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.data.recording import Recording
|
||||
from ria_toolkit_oss.datatypes.recording import Recording
|
||||
from ria_toolkit_oss.orchestration.campaign import QAConfig
|
||||
from ria_toolkit_oss.orchestration.qa import QAResult, check_recording, estimate_snr_db
|
||||
|
||||
|
|
|
|||
|
|
@ -1,153 +0,0 @@
|
|||
"""Tests for TxExecutor — signal synthesis and step execution."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from unittest.mock import patch
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.orchestration.tx_executor import TxExecutor
|
||||
|
||||
|
||||
def _cfg(modulation="QPSK", symbol_rate=100_000, steps=None):
|
||||
return {
|
||||
"id": "test-tx",
|
||||
"type": "sdr",
|
||||
"control_method": "sdr_agent",
|
||||
"sdr_agent": {
|
||||
"modulation": modulation,
|
||||
"symbol_rate": symbol_rate,
|
||||
"center_frequency": 0.0,
|
||||
"filter": "rrc",
|
||||
"rolloff": 0.35,
|
||||
},
|
||||
"schedule": steps or [{"label": "step1", "duration": 0.001, "power_dbm": -10}],
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Initialisation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestTxExecutorInit:
|
||||
def test_stores_sdr_device(self):
|
||||
ex = TxExecutor(_cfg(), sdr_device="pluto")
|
||||
assert ex.sdr_device == "pluto"
|
||||
|
||||
def test_stop_event_created_when_not_supplied(self):
|
||||
ex = TxExecutor(_cfg())
|
||||
assert isinstance(ex.stop_event, threading.Event)
|
||||
assert not ex.stop_event.is_set()
|
||||
|
||||
def test_accepts_external_stop_event(self):
|
||||
ev = threading.Event()
|
||||
ex = TxExecutor(_cfg(), stop_event=ev)
|
||||
assert ex.stop_event is ev
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# run() — schedule iteration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestTxExecutorRun:
|
||||
def test_empty_schedule_returns_immediately(self):
|
||||
cfg = _cfg(steps=[])
|
||||
ex = TxExecutor(cfg)
|
||||
ex.run() # must not raise or block
|
||||
|
||||
def test_pre_set_stop_event_skips_all_steps(self):
|
||||
ev = threading.Event()
|
||||
ev.set()
|
||||
ex = TxExecutor(_cfg(), stop_event=ev)
|
||||
# If stop was set, _execute_step should never be called.
|
||||
# run() should return cleanly without attempting synthesis.
|
||||
ex.run()
|
||||
|
||||
def test_no_sdr_falls_back_to_simulation(self, monkeypatch):
|
||||
"""Without SDR hardware TxExecutor simulates by calling stop_event.wait."""
|
||||
cfg = _cfg(steps=[{"label": "s", "duration": 0.001, "power_dbm": 0}])
|
||||
waited = []
|
||||
real_ev = threading.Event()
|
||||
|
||||
def _fake_wait(timeout=None):
|
||||
waited.append(timeout)
|
||||
return False
|
||||
|
||||
monkeypatch.setattr(real_ev, "wait", _fake_wait)
|
||||
|
||||
# Patch SDR init to always fail (forces simulation path)
|
||||
with patch.object(TxExecutor, "_init_sdr", lambda self, *a, **kw: setattr(self, "_sdr", None)):
|
||||
ex = TxExecutor(cfg, sdr_device="nonexistent_xyz", stop_event=real_ev)
|
||||
ex.run()
|
||||
|
||||
assert len(waited) >= 1, "expected stop_event.wait to be called for simulation"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _synthesise — all modulation types and filter types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSynthesise:
|
||||
@pytest.fixture(autouse=True)
|
||||
def _ex(self):
|
||||
self.ex = TxExecutor(_cfg())
|
||||
|
||||
def _synth(self, mod, num_samples=256):
|
||||
return self.ex._synthesise(mod, sps=4, num_samples=num_samples, filter_type="rrc", rolloff=0.35)
|
||||
|
||||
@pytest.mark.parametrize("mod", ["BPSK", "QPSK", "8PSK", "16QAM", "64QAM", "256QAM"])
|
||||
def test_psk_qam_returns_complex64_array(self, mod):
|
||||
sig = self._synth(mod)
|
||||
assert sig.dtype == np.complex64
|
||||
assert len(sig) == 256
|
||||
|
||||
def test_fsk_returns_correct_length(self):
|
||||
sig = self._synth("FSK")
|
||||
assert len(sig) == 256
|
||||
|
||||
def test_ook_returns_correct_length(self):
|
||||
sig = self._synth("OOK")
|
||||
assert len(sig) == 256
|
||||
|
||||
def test_gmsk_returns_correct_length(self):
|
||||
sig = self._synth("GMSK")
|
||||
assert len(sig) == 256
|
||||
|
||||
def test_oqpsk_returns_correct_length(self):
|
||||
sig = self._synth("OQPSK")
|
||||
assert len(sig) == 256
|
||||
|
||||
@pytest.mark.parametrize("mod", ["BPSK", "QPSK", "16QAM", "FSK", "OOK", "GMSK"])
|
||||
def test_samples_are_finite(self, mod):
|
||||
sig = self._synth(mod)
|
||||
assert np.all(np.isfinite(sig.real)), f"{mod}: non-finite real samples"
|
||||
assert np.all(np.isfinite(sig.imag)), f"{mod}: non-finite imag samples"
|
||||
|
||||
def test_unknown_modulation_defaults_to_qpsk(self):
|
||||
sig = self._synth("UNKNOWN_MOD_XYZ")
|
||||
assert len(sig) == 256
|
||||
assert sig.dtype == np.complex64
|
||||
|
||||
@pytest.mark.parametrize("filter_type", ["rrc", "rc", "gaussian", "rect", "none"])
|
||||
def test_all_filter_types(self, filter_type):
|
||||
sig = self.ex._synthesise("QPSK", sps=4, num_samples=128, filter_type=filter_type, rolloff=0.35)
|
||||
assert len(sig) == 128
|
||||
|
||||
@pytest.mark.parametrize("n", [64, 128, 512, 1024])
|
||||
def test_output_length_matches_requested_samples(self, n):
|
||||
sig = self._synth("QPSK", num_samples=n)
|
||||
assert len(sig) == n
|
||||
|
||||
def test_bpsk_output_is_complex_not_real(self):
|
||||
sig = self._synth("BPSK")
|
||||
# complex64 always has imag part; just check dtype
|
||||
assert sig.dtype == np.complex64
|
||||
|
||||
def test_256qam_correct_length(self):
|
||||
sig = self._synth("256QAM")
|
||||
assert len(sig) == 256
|
||||
|
|
@ -7,7 +7,7 @@ import numpy as np
|
|||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
from ria_toolkit_oss.io import load_recording, to_npy, to_sigmf
|
||||
from ria_toolkit_oss_cli.cli import cli
|
||||
|
||||
|
|
|
|||
|
|
@ -189,8 +189,6 @@ class TestNoiseCommand:
|
|||
"10000",
|
||||
"--noise-type",
|
||||
"gaussian",
|
||||
"--power",
|
||||
"0.01",
|
||||
"--output",
|
||||
output,
|
||||
"-q",
|
||||
|
|
@ -236,7 +234,7 @@ class TestNoiseCommand:
|
|||
"--num-samples",
|
||||
"10000",
|
||||
"--power",
|
||||
"0.01",
|
||||
"0.5",
|
||||
"--output",
|
||||
output,
|
||||
"-q",
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import numpy as np
|
|||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from ria_toolkit_oss.data import Annotation, Recording
|
||||
from ria_toolkit_oss.datatypes import Annotation, Recording
|
||||
from ria_toolkit_oss.io import load_recording, to_sigmf
|
||||
from ria_toolkit_oss_cli.cli import cli
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
"""Tests for the RT-OSS HTTP server.
|
||||
|
||||
Covers: auth, inference lifecycle (without SDR/ONNX hardware), conductor
|
||||
Covers: auth, inference lifecycle (without SDR/ONNX hardware), orchestrator
|
||||
lifecycle (with mocked executor), and state helpers.
|
||||
|
||||
``start_inference`` and ``_inference_loop`` require real SDR hardware and an
|
||||
|
|
@ -286,17 +286,17 @@ class TestInferenceStop:
|
|||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# POST /conductor/deploy
|
||||
# POST /orchestrator/deploy
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestConductorDeploy:
|
||||
class TestOrchestratorDeploy:
|
||||
def test_deploy_422_on_invalid_config(self, client):
|
||||
with patch(
|
||||
"ria_toolkit_oss.server.routers.conductor.CampaignConfig.from_dict",
|
||||
"ria_toolkit_oss.server.routers.orchestrator.CampaignConfig.from_dict",
|
||||
side_effect=ValueError("missing required field 'name'"),
|
||||
):
|
||||
resp = client.post("/conductor/deploy", json={"config": {}})
|
||||
resp = client.post("/orchestrator/deploy", json={"config": {}})
|
||||
assert resp.status_code == 422
|
||||
|
||||
def test_deploy_returns_campaign_id(self, client):
|
||||
|
|
@ -307,10 +307,10 @@ class TestConductorDeploy:
|
|||
mock_executor.return_value.run.return_value = MagicMock(to_dict=lambda: {})
|
||||
|
||||
with (
|
||||
patch("ria_toolkit_oss.server.routers.conductor.CampaignConfig.from_dict", return_value=mock_cfg),
|
||||
patch("ria_toolkit_oss.server.routers.conductor.CampaignExecutor", mock_executor),
|
||||
patch("ria_toolkit_oss.server.routers.orchestrator.CampaignConfig.from_dict", return_value=mock_cfg),
|
||||
patch("ria_toolkit_oss.server.routers.orchestrator.CampaignExecutor", mock_executor),
|
||||
):
|
||||
resp = client.post("/conductor/deploy", json={"config": {"name": "test_campaign"}})
|
||||
resp = client.post("/orchestrator/deploy", json={"config": {"name": "test_campaign"}})
|
||||
|
||||
assert resp.status_code == 200
|
||||
body = resp.json()
|
||||
|
|
@ -325,23 +325,23 @@ class TestConductorDeploy:
|
|||
mock_executor.return_value.run.return_value = MagicMock(to_dict=lambda: {})
|
||||
|
||||
with (
|
||||
patch("ria_toolkit_oss.server.routers.conductor.CampaignConfig.from_dict", return_value=mock_cfg),
|
||||
patch("ria_toolkit_oss.server.routers.conductor.CampaignExecutor", mock_executor),
|
||||
patch("ria_toolkit_oss.server.routers.orchestrator.CampaignConfig.from_dict", return_value=mock_cfg),
|
||||
patch("ria_toolkit_oss.server.routers.orchestrator.CampaignExecutor", mock_executor),
|
||||
):
|
||||
resp = client.post("/conductor/deploy", json={"config": {}})
|
||||
resp = client.post("/orchestrator/deploy", json={"config": {}})
|
||||
|
||||
campaign_id = resp.json()["campaign_id"]
|
||||
assert state_module._campaigns.get(campaign_id) is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# GET /conductor/status/{campaign_id}
|
||||
# GET /orchestrator/status/{campaign_id}
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestConductorStatus:
|
||||
class TestOrchestratorStatus:
|
||||
def test_status_404_for_unknown_id(self, client):
|
||||
resp = client.get("/conductor/status/nonexistent-id")
|
||||
resp = client.get("/orchestrator/status/nonexistent-id")
|
||||
assert resp.status_code == 404
|
||||
|
||||
def test_status_returns_campaign_state(self, client):
|
||||
|
|
@ -357,7 +357,7 @@ class TestConductorStatus:
|
|||
)
|
||||
state_module._campaigns["abc-123"] = state
|
||||
|
||||
resp = client.get("/conductor/status/abc-123")
|
||||
resp = client.get("/orchestrator/status/abc-123")
|
||||
assert resp.status_code == 200
|
||||
body = resp.json()
|
||||
assert body["campaign_id"] == "abc-123"
|
||||
|
|
@ -367,13 +367,13 @@ class TestConductorStatus:
|
|||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# POST /conductor/cancel/{campaign_id}
|
||||
# POST /orchestrator/cancel/{campaign_id}
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestConductorCancel:
|
||||
class TestOrchestratorCancel:
|
||||
def test_cancel_404_for_unknown_id(self, client):
|
||||
resp = client.post("/conductor/cancel/no-such-id")
|
||||
resp = client.post("/orchestrator/cancel/no-such-id")
|
||||
assert resp.status_code == 404
|
||||
|
||||
def test_cancel_sets_cancel_event(self, client):
|
||||
|
|
@ -387,7 +387,7 @@ class TestConductorCancel:
|
|||
)
|
||||
state_module._campaigns["camp-to-cancel"] = state
|
||||
|
||||
resp = client.post("/conductor/cancel/camp-to-cancel")
|
||||
resp = client.post("/orchestrator/cancel/camp-to-cancel")
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()["cancelled"] is True
|
||||
assert cancel_event.is_set()
|
||||
|
|
@ -403,7 +403,7 @@ class TestConductorCancel:
|
|||
)
|
||||
state_module._campaigns["done"] = state
|
||||
|
||||
resp = client.post("/conductor/cancel/done")
|
||||
resp = client.post("/orchestrator/cancel/done")
|
||||
assert resp.status_code == 200
|
||||
assert resp.json()["cancelled"] is False
|
||||
assert not cancel_event.is_set()
|
||||
|
|
|
|||
|
|
@ -1,247 +0,0 @@
|
|||
"""Tests for NodeAgent — TX role, session code, and TX command dispatch."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
import time
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from ria_toolkit_oss.agent import NodeAgent
|
||||
|
||||
|
||||
def _agent(role="general", session_code=None, **kwargs):
|
||||
return NodeAgent(
|
||||
hub_url="http://hub.test",
|
||||
api_key="test-key",
|
||||
name="test-node",
|
||||
sdr_device="mock",
|
||||
role=role,
|
||||
session_code=session_code,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def _mock_register(agent, node_id="node_abc123"):
|
||||
"""Patch _post so _register() returns a fake node_id response."""
|
||||
resp = MagicMock()
|
||||
resp.json.return_value = {"node_id": node_id}
|
||||
resp.raise_for_status.return_value = None
|
||||
agent._post = MagicMock(return_value=resp)
|
||||
return agent._post
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Initialisation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNodeAgentInit:
|
||||
def test_stores_role_general(self):
|
||||
assert _agent(role="general").role == "general"
|
||||
|
||||
def test_stores_role_tx(self):
|
||||
assert _agent(role="tx").role == "tx"
|
||||
|
||||
def test_stores_role_rx(self):
|
||||
assert _agent(role="rx").role == "rx"
|
||||
|
||||
def test_session_code_stored(self):
|
||||
assert _agent(session_code="amber-peak-transmit").session_code == "amber-peak-transmit"
|
||||
|
||||
def test_session_code_none_by_default(self):
|
||||
assert _agent().session_code is None
|
||||
|
||||
def test_tx_stop_event_created(self):
|
||||
a = _agent()
|
||||
assert isinstance(a._tx_stop, threading.Event)
|
||||
|
||||
def test_tx_thread_none_initially(self):
|
||||
assert _agent()._tx_thread is None
|
||||
|
||||
def test_hub_url_trailing_slash_stripped(self):
|
||||
a = NodeAgent(hub_url="http://hub.test/", api_key="k", name="n")
|
||||
assert a.hub_url == "http://hub.test"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _register payload
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNodeAgentRegisterPayload:
|
||||
def _payload(self, agent):
|
||||
post = _mock_register(agent)
|
||||
agent._register()
|
||||
_, kwargs = post.call_args
|
||||
return kwargs["json"]
|
||||
|
||||
def test_general_role_in_payload(self):
|
||||
payload = self._payload(_agent(role="general"))
|
||||
assert payload["role"] == "general"
|
||||
|
||||
def test_tx_role_in_payload(self):
|
||||
payload = self._payload(_agent(role="tx"))
|
||||
assert payload["role"] == "tx"
|
||||
|
||||
def test_tx_role_adds_transmit_capability(self):
|
||||
payload = self._payload(_agent(role="tx"))
|
||||
assert "transmit" in payload["capabilities"]
|
||||
|
||||
def test_general_role_omits_transmit_capability(self):
|
||||
payload = self._payload(_agent(role="general"))
|
||||
assert "transmit" not in payload.get("capabilities", [])
|
||||
|
||||
def test_session_code_included_when_set(self):
|
||||
payload = self._payload(_agent(role="tx", session_code="amber-peak-transmit"))
|
||||
assert payload["session_code"] == "amber-peak-transmit"
|
||||
|
||||
def test_session_code_omitted_when_none(self):
|
||||
payload = self._payload(_agent())
|
||||
assert "session_code" not in payload
|
||||
|
||||
def test_register_stores_returned_node_id(self):
|
||||
a = _agent()
|
||||
_mock_register(a, node_id="node_xyz999")
|
||||
a._register()
|
||||
assert a.node_id == "node_xyz999"
|
||||
|
||||
def test_name_in_payload(self):
|
||||
a = NodeAgent(hub_url="http://h", api_key="k", name="my-bench")
|
||||
_mock_register(a)
|
||||
a._register()
|
||||
_, kwargs = a._post.call_args
|
||||
assert kwargs["json"]["name"] == "my-bench"
|
||||
|
||||
def test_sdr_device_in_payload(self):
|
||||
a = _agent()
|
||||
post = _mock_register(a)
|
||||
a._register()
|
||||
_, kwargs = post.call_args
|
||||
assert kwargs["json"]["sdr_device"] == "mock"
|
||||
|
||||
def test_campaign_capability_always_present(self):
|
||||
for role in ("general", "rx", "tx"):
|
||||
a = _agent(role=role)
|
||||
payload = self._payload(a)
|
||||
assert "campaign" in payload["capabilities"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _dispatch — TX commands
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNodeAgentDispatch:
|
||||
def _make_agent(self):
|
||||
a = _agent(role="tx")
|
||||
a.node_id = "node_abc"
|
||||
a._report_campaign_status = MagicMock()
|
||||
return a
|
||||
|
||||
def test_start_transmit_spawns_thread(self):
|
||||
a = self._make_agent()
|
||||
done = threading.Event()
|
||||
|
||||
class _FakeExecutor:
|
||||
def run(self_):
|
||||
done.wait(timeout=2)
|
||||
|
||||
with patch("ria_toolkit_oss.orchestration.tx_executor.TxExecutor", return_value=_FakeExecutor()):
|
||||
a._dispatch({"command": "start_transmit", "sdr_agent": {}, "schedule": []})
|
||||
time.sleep(0.05)
|
||||
assert a._tx_thread is not None
|
||||
done.set()
|
||||
|
||||
def test_start_transmit_clears_stop_event(self):
|
||||
a = self._make_agent()
|
||||
a._tx_stop.set() # pre-set
|
||||
|
||||
done = threading.Event()
|
||||
|
||||
class _FakeExecutor:
|
||||
def run(self_):
|
||||
done.wait(timeout=2)
|
||||
|
||||
with patch("ria_toolkit_oss.orchestration.tx_executor.TxExecutor", return_value=_FakeExecutor()):
|
||||
a._dispatch({"command": "start_transmit", "sdr_agent": {}, "schedule": []})
|
||||
time.sleep(0.05)
|
||||
assert not a._tx_stop.is_set()
|
||||
done.set()
|
||||
|
||||
def test_stop_transmit_sets_stop_event(self):
|
||||
a = self._make_agent()
|
||||
a._dispatch({"command": "stop_transmit"})
|
||||
assert a._tx_stop.is_set()
|
||||
|
||||
def test_configure_transmit_does_not_raise(self):
|
||||
a = self._make_agent()
|
||||
a._dispatch({"command": "configure_transmit", "modulation": "BPSK"})
|
||||
|
||||
def test_unknown_command_is_ignored(self):
|
||||
a = self._make_agent()
|
||||
a._dispatch({"command": "frobnicate_xyz"})
|
||||
|
||||
def test_duplicate_start_transmit_ignored_while_running(self):
|
||||
a = self._make_agent()
|
||||
done = threading.Event()
|
||||
run_calls = []
|
||||
|
||||
class _FakeExecutor:
|
||||
def run(self_):
|
||||
run_calls.append(1)
|
||||
done.wait(timeout=2)
|
||||
|
||||
with patch("ria_toolkit_oss.orchestration.tx_executor.TxExecutor", return_value=_FakeExecutor()):
|
||||
a._dispatch({"command": "start_transmit"})
|
||||
time.sleep(0.05)
|
||||
a._dispatch({"command": "start_transmit"}) # second while first alive
|
||||
done.set()
|
||||
time.sleep(0.05)
|
||||
|
||||
assert len(run_calls) == 1
|
||||
|
||||
def test_run_campaign_dispatched_in_thread(self):
|
||||
a = self._make_agent()
|
||||
done = threading.Event()
|
||||
|
||||
with patch("ria_toolkit_oss.agent.NodeAgent._run_campaign") as mock_run:
|
||||
mock_run.side_effect = lambda *_: done.set()
|
||||
a._dispatch({"command": "run_campaign", "campaign_id": "c1", "payload": {}})
|
||||
done.wait(timeout=2)
|
||||
assert mock_run.called
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _stop_transmit
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStopTransmit:
|
||||
def test_no_thread_noop(self):
|
||||
a = _agent()
|
||||
a._stop_transmit() # must not raise
|
||||
|
||||
def test_sets_stop_event(self):
|
||||
a = _agent()
|
||||
a._stop_transmit()
|
||||
assert a._tx_stop.is_set()
|
||||
|
||||
def test_joins_live_thread(self):
|
||||
a = _agent()
|
||||
finished = threading.Event()
|
||||
unblock = threading.Event()
|
||||
|
||||
def _task():
|
||||
unblock.wait(timeout=2)
|
||||
finished.set()
|
||||
|
||||
t = threading.Thread(target=_task, daemon=True)
|
||||
t.start()
|
||||
a._tx_thread = t
|
||||
|
||||
# Signal stop and trigger thread exit
|
||||
a._tx_stop.set()
|
||||
unblock.set()
|
||||
a._stop_transmit()
|
||||
|
||||
assert not t.is_alive()
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.transforms import iq_augmentations
|
||||
|
||||
TEST_DATA1 = [[1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j]]
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ Bugs/issues identified during review:
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from ria_toolkit_oss.data import Recording
|
||||
from ria_toolkit_oss.datatypes import Recording
|
||||
from ria_toolkit_oss.transforms import iq_impairments
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user