11train :
2- train-manifest : ' examples/manifests/train_manifest.csv'
3- val-manifest : ' examples/manifests/val_manifest.csv'
4- labels-path : ' examples/labels.json' # Contains all characters for transcription
5- log-dir : ' logs' # Location for log files
6- def-dir : ' examples/checkpoints/' , # Default location to save/load models
7- git
8- load-from : ' asr_final.pth' # File name containing a checkpoint to continue/finetune
9-
10- sample-rate : 16000 # Sample rate
11- window-size : 0.02 # Window size for spectrogram in seconds
12- window-stride : 0.01 # Window stride for spectrogram in seconds
2+ train_manifest : ' examples/manifests/train_manifest.csv'
3+ val_manifest : ' examples/manifests/val_manifest.csv'
4+ labels_path : ' examples/labels.json' # Contains all characters for transcription
5+ log_dir : ' logs' # Location for log files
6+ def_dir : ' examples/checkpoints/' , # Default location to save/load models
7+
8+ load_from : ' asr_final.pth' # File name containing a checkpoint to continue/finetune
9+
10+ sample_rate : 16000 # Sample rate
11+ window_size : 0.02 # Window size for spectrogram in seconds
12+ window_stride : 0.01 # Window stride for spectrogram in seconds
1313 window : ' hamming' # Window type for spectrogram generation
1414
15- batch-size : 32 # Batch size for training
16- hidden-size : 800 # Hidden size of RNNs
17- hidden-layers : 5 # Number of RNN layers
18- rnn-type : ' gru' # Type of the RNN unit: gru|lstm are supported
15+ batch_size : 32 # Batch size for training
16+ hidden_size : 800 # Hidden size of RNNs
17+ hidden_layers : 5 # Number of RNN layers
18+ rnn_type : ' gru' # Type of the RNN unit: gru|lstm are supported
1919
20- max-epochs : 70 # Number of training epochs
21- learning-rate : 3e-4 # Initial learning rate
20+ max_epochs : 70 # Number of training epochs
21+ learning_rate : 3e-4 # Initial learning rate
2222 momentum : 0.9 # Momentum
23- max-norm : 800 # Norm cutoff to prevent explosion of gradients
24- learning-anneal : 1.1n # Annealing applied to learning rate every epoch
23+ max_norm : 800 # Norm cutoff to prevent explosion of gradients
24+ learning_anneal : 1.1n # Annealing applied to learning rate every epoch
2525 sortaGrad : True # Turn on ordering of dataset on sequence length for the first epoch
2626
2727 checkpoint : True # Enables checkpoint saving of model
28- checkpoint-per-epoch : 1 # Save checkpoint per x epochs
28+ checkpoint_per_epoch : 1 # Save checkpoint per x epochs
2929 silent : False # Turn off progress tracking per iteration
3030 continue : False # Continue training with a pre-trained model
3131 finetune : False # Finetune a pre-trained model
3232
33- num-data-workers : 8 # Number of workers used in data-loading
33+ num_data_workers : 8 # Number of workers used in data-loading
3434 augment : False # Use random tempo and gain perturbations
3535 shuffle : True # Turn on shuffling and sample from dataset based on sequence length (smallest to largest)
3636
3737 seed : 123456 # Seed to generators
3838 cuda : True # Use cuda to train model
39- half-precision : Trues # Uses half precision to train a model
39+ half_precision : Trues # Uses half precision to train a model
4040 apex : True # Uses mixed precision to train a model
41- static-loss-scaling : False # Static loss scale for mixed precision
42- dynamic-loss-scaling : True # Use dynamic loss scaling for mixed precision
41+ static_loss_scaling : False # Static loss scale for mixed precision
42+ dynamic_loss_scaling : True # Use dynamic loss scaling for mixed precision
4343
44- dist-url : ' tcp://127.0.0.1:1550' # URL used to set up distributed training
45- dist-backend : ' nccl' # Distributed backend
46- world-size : 1 # Number of distributed processes
44+ dist_url : ' tcp://127.0.0.1:1550' # URL used to set up distributed training
45+ dist_backend : ' nccl' # Distributed backend
46+ world_size : 1 # Number of distributed processes
4747 rank : 0 # The rank of the current process
48- gpu-rank : 0 # If using distributed parallel for multi-gpu , sets the GPU for the process
48+ gpu_rank : 0 # If using distributed parallel for multi_gpu , sets the GPU for the process
0 commit comments