deleted old recordings, updated gpu for training,

This commit is contained in:
Liyu Xiao 2025-05-22 15:57:20 -04:00
parent 123cb82334
commit 44507493a3
6 changed files with 50 additions and 51 deletions

3
.gitignore vendored
View File

@ -5,4 +5,5 @@ __pycache__/
*.h5 *.h5
*.ckpt *.ckpt
*.ipynb *.ipynb
*.onnx *.onnx
*.npy

View File

@ -20,9 +20,7 @@ training:
use_gpu: true use_gpu: true
inference: inference:
model_path: checkpoints/inference_recognition_model.ckpt
num_classes: 4 num_classes: 4
output_path: onnx_files/inference_recognition_model.onnx
app: app:
build_dir: dist build_dir: dist

View File

@ -79,5 +79,7 @@ if __name__ == "__main__":
convert_to_onnx( convert_to_onnx(
ckpt_path=os.path.join(CHECKPOINTS_DIR, model_checkpoint), fp16=False ckpt_path=os.path.join(CHECKPOINTS_DIR, model_checkpoint), fp16=False
) )
print("Conversion complete stored at: ", os.path.join(ONNX_DIR, model_checkpoint)) output_file = "inference_recognition_model.onnx"
print("Conversion complete stored at: ", os.path.join(ONNX_DIR, output_file))

View File

@ -13,56 +13,56 @@ mods = {
def generate_modulated_signals(): def generate_modulated_signals():
for modulation in ["bpsk", "qpsk", "qam16", "qam64"]: for modulation in ["bpsk", "qpsk", "qam16", "qam64"]:
for snr in np.arange(-6, 13, 3): for snr in np.arange(-6, 13, 3):
for i in range(100):
recording_length = 1024
beta = 0.3 # the rolloff factor, can be changed to add variety
sps = 4 # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed.
recording_length = 1024 # blocks don't directly take the string 'qpsk' so we use the dict 'mods' to get parameters
beta = 0.3 # the rolloff factor, can be changed to add variety constellation_type = mods[modulation]["constellation_type"]
sps = 4 # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed. num_bits_per_symbol = mods[modulation]["num_bits_per_symbol"]
# blocks don't directly take the string 'qpsk' so we use the dict 'mods' to get parameters # construct the digital modulation blocks with these parameters
constellation_type = mods[modulation]["constellation_type"] # we have bit source -> mapper -> upsampling -> pulse shaping
num_bits_per_symbol = mods[modulation]["num_bits_per_symbol"]
# construct the digital modulation blocks with these parameters bit_source = block_generator.RandomBinarySource()
# we have bit source -> mapper -> upsampling -> pulse shaping mapper = block_generator.Mapper(
constellation_type=constellation_type,
num_bits_per_symbol=num_bits_per_symbol,
)
upsampler = block_generator.Upsampling(factor=sps)
pulse_shaping_filter = block_generator.RaisedCosineFilter(
upsampling_factor=sps, beta=beta
)
bit_source = block_generator.RandomBinarySource() pulse_shaping_filter.connect_input([upsampler])
mapper = block_generator.Mapper( upsampler.connect_input([mapper])
constellation_type=constellation_type, mapper.connect_input([bit_source])
num_bits_per_symbol=num_bits_per_symbol,
)
upsampler = block_generator.Upsampling(factor=sps)
pulse_shaping_filter = block_generator.RaisedCosineFilter(
upsampling_factor=sps, beta=beta
)
pulse_shaping_filter.connect_input([upsampler]) modulation_recording = pulse_shaping_filter.record(
upsampler.connect_input([mapper]) num_samples=recording_length
mapper.connect_input([bit_source]) )
modulation_recording = pulse_shaping_filter.record( # add noise by calculating the power of the modulation recording and generating AWGN from the snr parameter
num_samples=recording_length signal_power = np.mean(np.abs(modulation_recording.data[0] ** 2))
) awgn_source = block_generator.AWGNSource(
variance=(signal_power / 2) * (10 ** (((-1 * snr) / 20)))
)
noise = awgn_source.record(num_samples=recording_length)
samples_with_noise = modulation_recording.data + noise.data
output_recording = Recording(data=samples_with_noise)
# add noise by calculating the power of the modulation recording and generating AWGN from the snr parameter # add metadata for ML later
signal_power = np.mean(np.abs(modulation_recording.data[0] ** 2)) output_recording.add_to_metadata(key="modulation", value=modulation)
awgn_source = block_generator.AWGNSource( output_recording.add_to_metadata(key="snr", value=int(snr))
variance=(signal_power / 2) * (10 ** (((-1 * snr) / 20))) output_recording.add_to_metadata(key="beta", value=beta)
) output_recording.add_to_metadata(key="sps", value=sps)
noise = awgn_source.record(num_samples=recording_length)
samples_with_noise = modulation_recording.data + noise.data
output_recording = Recording(data=samples_with_noise)
# add metadata for ML later # view if you want
output_recording.add_to_metadata(key="modulation", value=modulation) # output_recording.view()
output_recording.add_to_metadata(key="snr", value=int(snr))
output_recording.add_to_metadata(key="beta", value=beta)
output_recording.add_to_metadata(key="sps", value=sps)
# view if you want # save to file
# output_recording.view() output_recording.to_npy() # optionally add path and filename parameters
# save to file
output_recording.to_npy() # optionally add path and filename parameters
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -31,7 +31,7 @@ def train_model():
train_flag = True train_flag = True
batch_size = 128 batch_size = 128
epochs = 1 epochs = 50
checkpoint_dir = training_cfg.checkpoint_dir checkpoint_dir = training_cfg.checkpoint_dir
checkpoint_filename = training_cfg.checkpoint_filename checkpoint_filename = training_cfg.checkpoint_filename
@ -76,8 +76,8 @@ def train_model():
hparams = { hparams = {
"drop_path_rate": 0.2, "drop_path_rate": 0.2,
"drop_rate": 0.5, "drop_rate": 0.5,
"learning_rate": 3e-4, "learning_rate": 1e-4,
"wd": 0.2, "wd": 0.01,
} }
class RFClassifier(L.LightningModule): class RFClassifier(L.LightningModule):

View File

@ -33,9 +33,7 @@ class TrainingConfig:
@dataclass @dataclass
class InferenceConfig: class InferenceConfig:
model_path: str
num_classes: int num_classes: int
output_path: str
@dataclass @dataclass