Updated formatting and comments. No modification to configuration values.
This commit is contained in:
parent
c12ba88b78
commit
c9e996bac8
|
@ -1,20 +1,16 @@
|
||||||
general:
|
|
||||||
# Run mode. Options are 'prod' or 'dev'.
|
|
||||||
run_mode: prod
|
|
||||||
|
|
||||||
dataset:
|
dataset:
|
||||||
#number of slices you want to split each recording into
|
# Seed for the random number generator, used for signal generation
|
||||||
num_slices: 8
|
seed: 42
|
||||||
|
|
||||||
#training/val split between the 2 data sets
|
# Number of samples per recording
|
||||||
train_split: 0.8
|
recording_length: 1024
|
||||||
val_split : 0.2
|
|
||||||
|
|
||||||
#used to initialize a random number generator.
|
# List of signal modulation schemes to include in the dataset
|
||||||
seed: 25
|
modulation_types:
|
||||||
|
- bpsk
|
||||||
#multiple modulations to contain in the dataset
|
- qpsk
|
||||||
modulation_types: [bpsk, qpsk, qam16, qam64]
|
- qam16
|
||||||
|
- qam64
|
||||||
|
|
||||||
# Rolloff factor for pulse shaping filter (0 < beta <= 1)
|
# Rolloff factor for pulse shaping filter (0 < beta <= 1)
|
||||||
beta: 0.3
|
beta: 0.3
|
||||||
|
@ -23,20 +19,18 @@ dataset:
|
||||||
sps: 4
|
sps: 4
|
||||||
|
|
||||||
# SNR sweep range: start, stop (exclusive), and step (in dB)
|
# SNR sweep range: start, stop (exclusive), and step (in dB)
|
||||||
snr_start: -6 # Start value of SNR sweep (in dB)
|
snr_start: -6
|
||||||
snr_stop: 13 # Stop value (exclusive) of SNR sweep (in dB)
|
snr_stop: 13
|
||||||
snr_step: 3 # Step size for SNR sweep (in dB)
|
snr_step: 3
|
||||||
|
|
||||||
# Number of iterations (samples) per modulation and SNR combination
|
# Number of iterations (signal recordings) per modulation and SNR combination
|
||||||
num_iterations: 3
|
num_iterations: 3
|
||||||
|
|
||||||
# Number of samples per generated recording
|
# Modulation scheme settings; keys must match the `modulation_types` list above
|
||||||
recording_length: 1024
|
# Each entry includes:
|
||||||
|
# - num_bits_per_symbol: bits encoded per symbol (e.g., 1 for BPSK, 4 for 16-QAM)
|
||||||
# Settings for each modulation scheme
|
# - constellation_type: modulation category (e.g., "psk", "qam", "fsk", "ofdm")
|
||||||
# Keys must match entries in `modulation_types`
|
# TODO: Combine entries for 'modulation_types' and 'modulation_settings'
|
||||||
# - `num_bits_per_symbol`: how many bits each symbol encodes (e.g., 1 for BPSK, 4 for 16-QAM)
|
|
||||||
# - `constellation_type`: type of modulation (e.g., "psk", "qam", "fsk", "ofdm")
|
|
||||||
modulation_settings:
|
modulation_settings:
|
||||||
bpsk:
|
bpsk:
|
||||||
num_bits_per_symbol: 1
|
num_bits_per_symbol: 1
|
||||||
|
@ -51,20 +45,25 @@ dataset:
|
||||||
num_bits_per_symbol: 6
|
num_bits_per_symbol: 6
|
||||||
constellation_type: qam
|
constellation_type: qam
|
||||||
|
|
||||||
|
# Number of slices to cut from each recording
|
||||||
|
num_slices: 8
|
||||||
|
|
||||||
|
# Training and validation split ratios; must sum to 1
|
||||||
|
train_split: 0.8
|
||||||
|
val_split : 0.2
|
||||||
|
|
||||||
training:
|
training:
|
||||||
# Number of training samples processed together before the model updates its weights
|
# Number of training examples processed together before the model updates its weights
|
||||||
batch_size: 256
|
batch_size: 256
|
||||||
|
|
||||||
# Number of complete passes through the training dataset during training
|
# Number of complete passes through the training dataset during training
|
||||||
epochs: 5
|
epochs: 5
|
||||||
|
|
||||||
# Learning rate: how much weights are updated after every batch
|
# Learning rate: step size for weight updates after each batch
|
||||||
# Suggested range for fine-tuning: 1e-6 to 1e-4
|
# Recommended range for fine-tuning: 1e-6 to 1e-4
|
||||||
learning_rate: 1e-4
|
learning_rate: 1e-4
|
||||||
|
|
||||||
# Whether to use GPU acceleration for training (if available)
|
# Enable GPU acceleration for training if available
|
||||||
use_gpu: true
|
use_gpu: true
|
||||||
|
|
||||||
# Dropout rate for individual neurons/layers (probability of dropping out a unit)
|
# Dropout rate for individual neurons/layers (probability of dropping out a unit)
|
||||||
|
@ -73,13 +72,12 @@ training:
|
||||||
# Drop path rate: probability of dropping entire residual paths (stochastic depth)
|
# Drop path rate: probability of dropping entire residual paths (stochastic depth)
|
||||||
drop_path_rate: 0.2
|
drop_path_rate: 0.2
|
||||||
|
|
||||||
# Weight decay (L2 regularization) to help prevent overfitting
|
# Weight decay (L2 regularization) coefficient to help prevent overfitting
|
||||||
wd: 0.01
|
wd: 0.01
|
||||||
|
|
||||||
|
|
||||||
app:
|
app:
|
||||||
# Optimization style for ORT conversion. Options: 'Fixed', 'None'
|
# Optimization style for ORT conversion; options: 'Fixed', 'None'
|
||||||
optimization_style: Fixed
|
optimization_style: "Fixed"
|
||||||
|
|
||||||
# Target platform architecture. Common options: 'amd64', 'arm64'
|
# Target platform architecture; common options: 'amd64', 'arm64'
|
||||||
target_platform: amd64
|
target_platform: "amd64"
|
||||||
|
|
Loading…
Reference in New Issue
Block a user