67 lines
1.8 KiB
YAML
67 lines
1.8 KiB
YAML
general:
|
|
# Run mode. Options are 'prod' or 'dev'.
|
|
run_mode: prod
|
|
|
|
dataset:
|
|
#number of slices you want to split each recording into
|
|
num_slices: 8
|
|
|
|
#training/val split between the 2 data sets
|
|
train_split: 0.8
|
|
val_split : 0.2
|
|
|
|
#used to initialize a random number generator.
|
|
seed: 25
|
|
|
|
#multiple modulations to contain in the dataset
|
|
modulation_types: [bpsk, qpsk, qam16, qam64]
|
|
|
|
# Rolloff factor for pulse shaping filter (0 < beta <= 1)
|
|
beta: 0.3
|
|
|
|
# Samples per symbol (determines bandwidth of the digital signal)
|
|
sps: 4
|
|
|
|
# SNR sweep range: start, stop (exclusive), and step (in dB)
|
|
snr_start: -6 # Start value of SNR sweep (in dB)
|
|
snr_stop: 13 # Stop value (exclusive) of SNR sweep (in dB)
|
|
snr_step: 3 # Step size for SNR sweep (in dB)
|
|
|
|
# Number of iterations (samples) per modulation and SNR combination
|
|
num_iterations: 3
|
|
|
|
# Number of samples per generated recording
|
|
recording_length: 1024
|
|
|
|
|
|
|
|
training:
|
|
# Number of training samples processed together before the model updates its weights
|
|
batch_size: 256
|
|
|
|
# Number of complete passes through the training dataset during training
|
|
epochs: 5
|
|
|
|
# Learning rate: how much weights are updated after every batch
|
|
# Suggested range for fine-tuning: 1e-6 to 1e-4
|
|
learning_rate: 1e-4
|
|
|
|
# Whether to use GPU acceleration for training (if available)
|
|
use_gpu: true
|
|
|
|
# Dropout rate for individual neurons/layers (probability of dropping out a unit)
|
|
drop_rate: 0.5
|
|
|
|
# Drop path rate: probability of dropping entire residual paths (stochastic depth)
|
|
drop_path_rate: 0.2
|
|
|
|
# Weight decay (L2 regularization) to help prevent overfitting
|
|
wd: 0.01
|
|
|
|
|
|
app:
|
|
# Optimization style for ORT conversion. Options: 'Fixed', 'None'
|
|
optimization_style: Fixed
|
|
|
|
# Target platform architecture. Common options: 'amd64', 'arm64'
|
|
target_platform: amd64 |