forked from qoherent/modrec-workflow
deleted old recordings, updated gpu for training,
This commit is contained in:
parent
123cb82334
commit
44507493a3
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -6,3 +6,4 @@ __pycache__/
|
||||||
*.ckpt
|
*.ckpt
|
||||||
*.ipynb
|
*.ipynb
|
||||||
*.onnx
|
*.onnx
|
||||||
|
*.npy
|
|
@ -20,9 +20,7 @@ training:
|
||||||
use_gpu: true
|
use_gpu: true
|
||||||
|
|
||||||
inference:
|
inference:
|
||||||
model_path: checkpoints/inference_recognition_model.ckpt
|
|
||||||
num_classes: 4
|
num_classes: 4
|
||||||
output_path: onnx_files/inference_recognition_model.onnx
|
|
||||||
|
|
||||||
app:
|
app:
|
||||||
build_dir: dist
|
build_dir: dist
|
|
@ -80,4 +80,6 @@ if __name__ == "__main__":
|
||||||
ckpt_path=os.path.join(CHECKPOINTS_DIR, model_checkpoint), fp16=False
|
ckpt_path=os.path.join(CHECKPOINTS_DIR, model_checkpoint), fp16=False
|
||||||
)
|
)
|
||||||
|
|
||||||
print("Conversion complete stored at: ", os.path.join(ONNX_DIR, model_checkpoint))
|
output_file = "inference_recognition_model.onnx"
|
||||||
|
|
||||||
|
print("Conversion complete stored at: ", os.path.join(ONNX_DIR, output_file))
|
||||||
|
|
|
@ -13,7 +13,7 @@ mods = {
|
||||||
def generate_modulated_signals():
|
def generate_modulated_signals():
|
||||||
for modulation in ["bpsk", "qpsk", "qam16", "qam64"]:
|
for modulation in ["bpsk", "qpsk", "qam16", "qam64"]:
|
||||||
for snr in np.arange(-6, 13, 3):
|
for snr in np.arange(-6, 13, 3):
|
||||||
|
for i in range(100):
|
||||||
recording_length = 1024
|
recording_length = 1024
|
||||||
beta = 0.3 # the rolloff factor, can be changed to add variety
|
beta = 0.3 # the rolloff factor, can be changed to add variety
|
||||||
sps = 4 # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed.
|
sps = 4 # samples per symbol, or the relative bandwidth of the digital signal. Can also be changed.
|
||||||
|
|
|
@ -31,7 +31,7 @@ def train_model():
|
||||||
|
|
||||||
train_flag = True
|
train_flag = True
|
||||||
batch_size = 128
|
batch_size = 128
|
||||||
epochs = 1
|
epochs = 50
|
||||||
|
|
||||||
checkpoint_dir = training_cfg.checkpoint_dir
|
checkpoint_dir = training_cfg.checkpoint_dir
|
||||||
checkpoint_filename = training_cfg.checkpoint_filename
|
checkpoint_filename = training_cfg.checkpoint_filename
|
||||||
|
@ -76,8 +76,8 @@ def train_model():
|
||||||
hparams = {
|
hparams = {
|
||||||
"drop_path_rate": 0.2,
|
"drop_path_rate": 0.2,
|
||||||
"drop_rate": 0.5,
|
"drop_rate": 0.5,
|
||||||
"learning_rate": 3e-4,
|
"learning_rate": 1e-4,
|
||||||
"wd": 0.2,
|
"wd": 0.01,
|
||||||
}
|
}
|
||||||
|
|
||||||
class RFClassifier(L.LightningModule):
|
class RFClassifier(L.LightningModule):
|
||||||
|
|
|
@ -33,9 +33,7 @@ class TrainingConfig:
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class InferenceConfig:
|
class InferenceConfig:
|
||||||
model_path: str
|
|
||||||
num_classes: int
|
num_classes: int
|
||||||
output_path: str
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
|
Loading…
Reference in New Issue
Block a user