Edresson commited on
Commit
2ebc057
1 Parent(s): 1a27fd5

Add checkpoints

Browse files
all_results.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 79.0,
3
+ "eval_loss": 0.33847060799598694,
4
+ "eval_mem_cpu_alloc_delta": -354455552,
5
+ "eval_mem_cpu_peaked_delta": 406319104,
6
+ "eval_mem_gpu_alloc_delta": 0,
7
+ "eval_mem_gpu_peaked_delta": 5722557440,
8
+ "eval_runtime": 135.2302,
9
+ "eval_samples": 1500,
10
+ "eval_samples_per_second": 11.092,
11
+ "eval_wer": 0.19016709793430112,
12
+ "init_mem_cpu_alloc_delta": 1954693120,
13
+ "init_mem_cpu_peaked_delta": 0,
14
+ "init_mem_gpu_alloc_delta": 1261939712,
15
+ "init_mem_gpu_peaked_delta": 0,
16
+ "train_mem_cpu_alloc_delta": 1794605056,
17
+ "train_mem_cpu_peaked_delta": 24576,
18
+ "train_mem_gpu_alloc_delta": 3804792832,
19
+ "train_mem_gpu_peaked_delta": 9913940480,
20
+ "train_runtime": 205971.9249,
21
+ "train_samples": 30823,
22
+ "train_samples_per_second": 0.109
23
+ }
config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-100k-voxpopuli",
3
+ "activation_dropout": 0.0,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "Wav2Vec2ForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "codevector_dim": 768,
11
+ "contrastive_logits_temperature": 0.1,
12
+ "conv_bias": true,
13
+ "conv_dim": [
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512,
20
+ 512
21
+ ],
22
+ "conv_kernel": [
23
+ 10,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 3,
28
+ 2,
29
+ 2
30
+ ],
31
+ "conv_stride": [
32
+ 5,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2,
38
+ 2
39
+ ],
40
+ "ctc_loss_reduction": "mean",
41
+ "ctc_zero_infinity": true,
42
+ "diversity_loss_weight": 0.1,
43
+ "do_stable_layer_norm": true,
44
+ "eos_token_id": 2,
45
+ "feat_extract_activation": "gelu",
46
+ "feat_extract_dropout": 0.0,
47
+ "feat_extract_norm": "layer",
48
+ "feat_proj_dropout": 0.1,
49
+ "feat_quantizer_dropout": 0.0,
50
+ "final_dropout": 0.0,
51
+ "gradient_checkpointing": true,
52
+ "hidden_act": "gelu",
53
+ "hidden_dropout": 0.1,
54
+ "hidden_size": 1024,
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 4096,
57
+ "layer_norm_eps": 1e-05,
58
+ "layerdrop": 0.0,
59
+ "mask_channel_length": 10,
60
+ "mask_channel_min_space": 1,
61
+ "mask_channel_other": 0.0,
62
+ "mask_channel_prob": 0.0,
63
+ "mask_channel_selection": "static",
64
+ "mask_feature_length": 10,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_space": 1,
68
+ "mask_time_other": 0.0,
69
+ "mask_time_prob": 0.05,
70
+ "mask_time_selection": "static",
71
+ "model_type": "wav2vec2",
72
+ "num_attention_heads": 16,
73
+ "num_codevector_groups": 2,
74
+ "num_codevectors_per_group": 320,
75
+ "num_conv_pos_embedding_groups": 16,
76
+ "num_conv_pos_embeddings": 128,
77
+ "num_feat_extract_layers": 7,
78
+ "num_hidden_layers": 24,
79
+ "num_negatives": 100,
80
+ "pad_token_id": 0,
81
+ "proj_codevector_dim": 768,
82
+ "transformers_version": "4.6.1",
83
+ "vocab_size": 45
84
+ }
config_train.json ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "run_name": "Wav2Vec-fine-tuning-TEDx",
3
+ "run_description": "Fine tuning TEDx",
4
+ "seed": 42,
5
+ // AUDIO PARAMS
6
+ "sampling_rate": 16000,
7
+
8
+ // VOCABULARY PARAMETERS
9
+ "vocab":{
10
+ "vocab_path": "example/vocab_example.json", // generic vocab for Portuguese
11
+ "blank": "<pad>", // blank token for padding
12
+ "silence": "|", // token between words
13
+ "unk": "<unk>" // unk token
14
+ },
15
+
16
+ // TRAINING
17
+ "batch_size": 8, // Batch size for training.
18
+ "mixed_precision": true, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate.
19
+ "early_stop_epochs": 10, // If 0 disabled else Number of epochs for stop training with validation loss dont decrease
20
+ "preprocess_dataset": false, // if true, the dataset will be pre-processed and saved in disk, otherwise the audio files will be loaded in each step. Preprocessing makes training faster, but requires much more disk space.
21
+
22
+ // OPTIMIZER
23
+ "epochs": 140, // total number of epochs to train.
24
+ "lr": 0.00003, // Initial learning rate.
25
+ "gradient_accumulation_steps": 24,
26
+
27
+ // LOGGING
28
+ "logging_steps": 100, // Number of steps to plot.
29
+ "load_best_model_at_end": true,
30
+ "save_total_limit": 3,
31
+ "warmup_ratio": 0.06666666667, // 0 disable Ratio of total training steps used for a linear warmup from 0 to learning_rate
32
+ "warmup_steps": 0, // 0 disable Number of steps used for a linear warmup from 0 to learning_rate
33
+
34
+ // DATA LOADING
35
+ "num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are goo
36
+
37
+ // MODEL
38
+ "freeze_feature_extractor": true, // Whether to freeze the feature extractor layers of the model.
39
+ "attention_dropout": 0.1, // The dropout ratio for the attention probabilities.
40
+ "activation_dropout": 0.1, // The dropout ratio for activations inside the fully connected layer.
41
+ "hidden_dropout": 0.1, // The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
42
+ "feat_proj_dropout": 0.1, // The dropout probabilitiy for all 1D convolutional layers in feature extractor.
43
+ "mask_time_prob": 0.05, // Propability of each feature vector along the time axis to be chosen as the start of the vector span to be masked.
44
+ "layerdrop": 0.0, // The LayerDrop probability.
45
+ "gradient_checkpointing": true, // If True, use gradient checkpointing to save memory at the expense of slower backward pass.
46
+
47
+ // ToDo: Implement Time mask and Frequency Mask
48
+ "audio_augmentation":[
49
+ // additive noise and room impulse response (RIR) simulation similar to: https://arxiv.org/pdf/2009.14153.pdf
50
+ {
51
+ "name": "additive",
52
+ "sounds_path":"/workspace/scratch/ecasanova/datasets/YourTTS2ASR/musan/speech/", // download: https://www.openslr.org/17/
53
+ "lru_cache_size": 32, // Maximum size of the LRU cache for storing noise files in memory
54
+ "min_snr_in_db": 13.0,
55
+ "max_snr_in_db": 20.0,
56
+ // "sample_rate": 16000,
57
+ "p": 0.25
58
+ },
59
+ {
60
+ "name": "additive",
61
+ "sounds_path":"/workspace/scratch/ecasanova/datasets/YourTTS2ASR/musan/music/", // download: https://www.openslr.org/17/
62
+ "lru_cache_size": 32, // Maximum size of the LRU cache for storing noise files in memory
63
+ "min_snr_in_db": 5.0,
64
+ "max_snr_in_db": 15.0,
65
+ // "sample_rate": 16000,
66
+ "p": 0.25
67
+ },
68
+ {
69
+ "name": "additive",
70
+ "sounds_path":"/workspace/scratch/ecasanova/datasets/YourTTS2ASR/musan/noise/", // download: https://www.openslr.org/17/
71
+ "lru_cache_size": 32, // Maximum size of the LRU cache for storing noise files in memory
72
+ "min_snr_in_db": 0.0,
73
+ "max_snr_in_db": 15.0,
74
+ // "sample_rate": 16000,
75
+ "p": 0.25
76
+ },
77
+ // rir filter proposed by: https://ieeexplore.ieee.org/document/7953152
78
+ {
79
+ "name": "rir",
80
+ "ir_path": "/workspace/scratch/ecasanova/datasets/YourTTS2ASR/RIRS_NOISES/simulated_rirs/", // download: https://www.openslr.org/28/
81
+ "lru_cache_size": 128, // Maximum size of the LRU cache for storing noise files in memory
82
+ // "sample_rate": 16000,
83
+ "p": 0.25
84
+ }
85
+ ,
86
+ // {
87
+ // "name": "gain",
88
+ // "min_gain_in_db": -18.0,
89
+ // "max_gain_in_db": 6,
90
+ // "p": 0.25 // propability of apply this method, 0 is disable
91
+ // },
92
+ {
93
+ "name": "pitch_shift",
94
+ "min_semitones": -4,
95
+ "max_semitones": 4,
96
+ "p": 0.25 // propability of apply this method, 0 is disable
97
+ },
98
+ {
99
+ "name": "gaussian",
100
+ "min_amplitude": 0.0001,
101
+ "max_amplitude": 0.001,
102
+ "p": 0.25 // propability of apply this method, 0 is disable
103
+ }
104
+ ],
105
+ // PATHS
106
+ "output_path": "../checkpoints/Wav2Vec-voxpopuli/one-speaker/Final-paper/GEN/PT/140-epoch/",
107
+ // CACHE
108
+ "dataset_cache": "../datasets/",
109
+
110
+ // DATASETS
111
+ "datasets":{
112
+
113
+ "files_path": "/workspace/scratch/ecasanova/datasets/YourTTS2ASR/Common_Voice/cv-corpus-7.0-2021-07-21/pt/", // relative path for audios It's will be join with the CS
114
+ "train":
115
+ [
116
+ // this dicts is pass directly for the load dataset see the documentation: https://huggingface.co/docs/datasets/package_reference/loading_methods.html#datasets.load_dataset
117
+ {
118
+ "name": "csv",
119
+ "path": "csv",
120
+ "data_files": ["/workspace/scratch/ecasanova/datasets/YourTTS2ASR/Common_Voice/cv-corpus-7.0-2021-07-21/pt/train_converted_copy_generated_en_speakers.csv"], // csv files
121
+ "text_column": "text",
122
+ "path_column": "file_path"
123
+ },
124
+ {
125
+ "name": "csv",
126
+ "path": "csv",
127
+ "data_files": ["/workspace/scratch/ecasanova/datasets/YourTTS2ASR/TTS-Portuguese-Corpus_16khz/train_TTS-Portuguese_Corpus_metadata_converted_to_ASR_copy_generated_VC_en_speakers_5_speakers_per_text_fixed.csv"], // csv files
128
+ "text_column": "text",
129
+ "path_column": "file_path"
130
+ },
131
+ {
132
+ "name": "csv",
133
+ "path": "csv",
134
+ "data_files": ["/workspace/scratch/ecasanova/datasets/YourTTS2ASR/TTS-Portuguese-Corpus_16khz/train_TTS-Portuguese_Corpus_metadata_converted_to_ASR.csv"], // csv files
135
+ "text_column": "text",
136
+ "path_column": "file_path"
137
+ }
138
+ ]
139
+ ,
140
+ "devel":
141
+ [
142
+ {
143
+ "name": "csv",
144
+ "path": "csv",
145
+ "data_files": ["/workspace/scratch/ecasanova/datasets/YourTTS2ASR/Common_Voice/cv-corpus-7.0-2021-07-21/pt/dev_converted_copy_generated_en_speakers_500-samples.csv"], // csv files
146
+ "text_column": "text",
147
+ "path_column": "file_path"
148
+ },
149
+ {
150
+ "name": "csv",
151
+ "path": "csv",
152
+ "data_files": ["/workspace/scratch/ecasanova/datasets/YourTTS2ASR/TTS-Portuguese-Corpus_16khz/eval_TTS-Portuguese_Corpus_metadata_converted_to_ASR_copy_generated_VC_en_speakers-500-samples.csv"], // csv files
153
+ "text_column": "text",
154
+ "path_column": "file_path"
155
+ },
156
+ {
157
+ "name": "csv",
158
+ "path": "csv",
159
+ "data_files": ["/workspace/scratch/ecasanova/datasets/YourTTS2ASR/TTS-Portuguese-Corpus_16khz/eval_TTS-Portuguese_Corpus_metadata_converted_to_ASR.csv"], // csv files
160
+ "text_column": "text",
161
+ "path_column": "file_path"
162
+ }
163
+ ]
164
+ ,
165
+ "test":
166
+ {
167
+ "name": "csv",
168
+ "path": "csv",
169
+ "data_files": ["/workspace/scratch/ecasanova/datasets/YourTTS2ASR/Common_Voice/cv-corpus-7.0-2021-07-21/pt/test_converted.csv"], // csv files
170
+ "text_column": "text",
171
+ "path_column": "file_path"
172
+ }
173
+
174
+ }//,
175
+ // used only for test
176
+ // "KenLM":{
177
+ // "kenlm_model_path": "../../kenLM/binaries/subtitle/4-gram/lm.binary", // Path for KenLM model
178
+ // "lexicon_path": "example/lexicon.lst", // file with all words for limit the decoder search
179
+ // "beam": 2048,
180
+ // "nbest": 1,
181
+ // "beam_threshold": 25,
182
+ // "lm_weight": 1,
183
+ // "word_score": -1,
184
+ // "sil_weight": 0
185
+ // }
186
+
187
+
188
+
189
+ }
190
+
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 79.0,
3
+ "eval_loss": 0.33847060799598694,
4
+ "eval_mem_cpu_alloc_delta": -354455552,
5
+ "eval_mem_cpu_peaked_delta": 406319104,
6
+ "eval_mem_gpu_alloc_delta": 0,
7
+ "eval_mem_gpu_peaked_delta": 5722557440,
8
+ "eval_runtime": 135.2302,
9
+ "eval_samples": 1500,
10
+ "eval_samples_per_second": 11.092,
11
+ "eval_wer": 0.19016709793430112
12
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eceb36972fbbe269122407bdcd2443cc8486ed88d986ecf5bd49678ee3c0c4e2
3
+ size 1262118359
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|"}
train_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 79.0,
3
+ "init_mem_cpu_alloc_delta": 1954693120,
4
+ "init_mem_cpu_peaked_delta": 0,
5
+ "init_mem_gpu_alloc_delta": 1261939712,
6
+ "init_mem_gpu_peaked_delta": 0,
7
+ "train_mem_cpu_alloc_delta": 1794605056,
8
+ "train_mem_cpu_peaked_delta": 24576,
9
+ "train_mem_gpu_alloc_delta": 3804792832,
10
+ "train_mem_gpu_peaked_delta": 9913940480,
11
+ "train_runtime": 205971.9249,
12
+ "train_samples": 30823,
13
+ "train_samples_per_second": 0.109
14
+ }
trainer_state.json ADDED
@@ -0,0 +1,1417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.3218751549720764,
3
+ "best_model_checkpoint": "../checkpoints/Wav2Vec-voxpopuli/one-speaker/Final-paper/GEN/PT/140-epoch/checkpoint-11040",
4
+ "epoch": 78.99662600570984,
5
+ "global_step": 12640,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 2.008032128514056e-08,
13
+ "loss": 12.1375,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.62,
18
+ "learning_rate": 2.0080321285140564e-06,
19
+ "loss": 12.2606,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 1.0,
24
+ "eval_loss": 11.042320251464844,
25
+ "eval_runtime": 128.6432,
26
+ "eval_samples_per_second": 11.66,
27
+ "eval_wer": 1.008201394675672,
28
+ "step": 160
29
+ },
30
+ {
31
+ "epoch": 1.25,
32
+ "learning_rate": 4.016064257028113e-06,
33
+ "loss": 11.2029,
34
+ "step": 200
35
+ },
36
+ {
37
+ "epoch": 1.87,
38
+ "learning_rate": 6.024096385542168e-06,
39
+ "loss": 8.2977,
40
+ "step": 300
41
+ },
42
+ {
43
+ "epoch": 2.0,
44
+ "eval_loss": 6.322738170623779,
45
+ "eval_runtime": 127.7737,
46
+ "eval_samples_per_second": 11.74,
47
+ "eval_wer": 1.0,
48
+ "step": 320
49
+ },
50
+ {
51
+ "epoch": 2.5,
52
+ "learning_rate": 8.032128514056226e-06,
53
+ "loss": 6.4999,
54
+ "step": 400
55
+ },
56
+ {
57
+ "epoch": 3.0,
58
+ "eval_loss": 4.472578525543213,
59
+ "eval_runtime": 134.2762,
60
+ "eval_samples_per_second": 11.171,
61
+ "eval_wer": 1.0,
62
+ "step": 480
63
+ },
64
+ {
65
+ "epoch": 3.12,
66
+ "learning_rate": 1.0040160642570281e-05,
67
+ "loss": 5.3252,
68
+ "step": 500
69
+ },
70
+ {
71
+ "epoch": 3.75,
72
+ "learning_rate": 1.2048192771084337e-05,
73
+ "loss": 4.4851,
74
+ "step": 600
75
+ },
76
+ {
77
+ "epoch": 4.0,
78
+ "eval_loss": 3.6344993114471436,
79
+ "eval_runtime": 133.8576,
80
+ "eval_samples_per_second": 11.206,
81
+ "eval_wer": 1.0,
82
+ "step": 640
83
+ },
84
+ {
85
+ "epoch": 4.37,
86
+ "learning_rate": 1.4056224899598394e-05,
87
+ "loss": 3.8872,
88
+ "step": 700
89
+ },
90
+ {
91
+ "epoch": 5.0,
92
+ "learning_rate": 1.606425702811245e-05,
93
+ "loss": 3.4357,
94
+ "step": 800
95
+ },
96
+ {
97
+ "epoch": 5.0,
98
+ "eval_loss": 3.175006628036499,
99
+ "eval_runtime": 134.2322,
100
+ "eval_samples_per_second": 11.175,
101
+ "eval_wer": 1.0,
102
+ "step": 800
103
+ },
104
+ {
105
+ "epoch": 5.62,
106
+ "learning_rate": 1.807228915662651e-05,
107
+ "loss": 3.1686,
108
+ "step": 900
109
+ },
110
+ {
111
+ "epoch": 6.0,
112
+ "eval_loss": 2.968130111694336,
113
+ "eval_runtime": 134.7282,
114
+ "eval_samples_per_second": 11.134,
115
+ "eval_wer": 1.0,
116
+ "step": 960
117
+ },
118
+ {
119
+ "epoch": 6.25,
120
+ "learning_rate": 2.0080321285140562e-05,
121
+ "loss": 3.0199,
122
+ "step": 1000
123
+ },
124
+ {
125
+ "epoch": 6.87,
126
+ "learning_rate": 2.208835341365462e-05,
127
+ "loss": 2.9408,
128
+ "step": 1100
129
+ },
130
+ {
131
+ "epoch": 7.0,
132
+ "eval_loss": 2.8903684616088867,
133
+ "eval_runtime": 134.0645,
134
+ "eval_samples_per_second": 11.189,
135
+ "eval_wer": 1.0,
136
+ "step": 1120
137
+ },
138
+ {
139
+ "epoch": 7.5,
140
+ "learning_rate": 2.4096385542168674e-05,
141
+ "loss": 2.9099,
142
+ "step": 1200
143
+ },
144
+ {
145
+ "epoch": 8.0,
146
+ "eval_loss": 2.7165446281433105,
147
+ "eval_runtime": 133.6155,
148
+ "eval_samples_per_second": 11.226,
149
+ "eval_wer": 0.9993421341169247,
150
+ "step": 1280
151
+ },
152
+ {
153
+ "epoch": 8.12,
154
+ "learning_rate": 2.6104417670682734e-05,
155
+ "loss": 2.8137,
156
+ "step": 1300
157
+ },
158
+ {
159
+ "epoch": 8.75,
160
+ "learning_rate": 2.8112449799196788e-05,
161
+ "loss": 2.6131,
162
+ "step": 1400
163
+ },
164
+ {
165
+ "epoch": 9.0,
166
+ "eval_loss": 2.137465476989746,
167
+ "eval_runtime": 132.9892,
168
+ "eval_samples_per_second": 11.279,
169
+ "eval_wer": 0.9312310863558616,
170
+ "step": 1440
171
+ },
172
+ {
173
+ "epoch": 9.37,
174
+ "learning_rate": 2.9991390031569885e-05,
175
+ "loss": 2.2614,
176
+ "step": 1500
177
+ },
178
+ {
179
+ "epoch": 10.0,
180
+ "learning_rate": 2.9847890557734623e-05,
181
+ "loss": 1.77,
182
+ "step": 1600
183
+ },
184
+ {
185
+ "epoch": 10.0,
186
+ "eval_loss": 1.2687721252441406,
187
+ "eval_runtime": 133.0804,
188
+ "eval_samples_per_second": 11.271,
189
+ "eval_wer": 0.7300118415858954,
190
+ "step": 1600
191
+ },
192
+ {
193
+ "epoch": 10.62,
194
+ "learning_rate": 2.9704391083899357e-05,
195
+ "loss": 1.4261,
196
+ "step": 1700
197
+ },
198
+ {
199
+ "epoch": 11.0,
200
+ "eval_loss": 0.9103694558143616,
201
+ "eval_runtime": 132.9746,
202
+ "eval_samples_per_second": 11.28,
203
+ "eval_wer": 0.5371255646682163,
204
+ "step": 1760
205
+ },
206
+ {
207
+ "epoch": 11.25,
208
+ "learning_rate": 2.9560891610064095e-05,
209
+ "loss": 1.2212,
210
+ "step": 1800
211
+ },
212
+ {
213
+ "epoch": 11.87,
214
+ "learning_rate": 2.9417392136228833e-05,
215
+ "loss": 1.075,
216
+ "step": 1900
217
+ },
218
+ {
219
+ "epoch": 12.0,
220
+ "eval_loss": 0.7756706476211548,
221
+ "eval_runtime": 132.9148,
222
+ "eval_samples_per_second": 11.285,
223
+ "eval_wer": 0.4647164598043945,
224
+ "step": 1920
225
+ },
226
+ {
227
+ "epoch": 12.5,
228
+ "learning_rate": 2.927389266239357e-05,
229
+ "loss": 0.9948,
230
+ "step": 2000
231
+ },
232
+ {
233
+ "epoch": 13.0,
234
+ "eval_loss": 0.666081964969635,
235
+ "eval_runtime": 134.3606,
236
+ "eval_samples_per_second": 11.164,
237
+ "eval_wer": 0.4066488311916144,
238
+ "step": 2080
239
+ },
240
+ {
241
+ "epoch": 13.12,
242
+ "learning_rate": 2.9130393188558308e-05,
243
+ "loss": 0.9185,
244
+ "step": 2100
245
+ },
246
+ {
247
+ "epoch": 13.75,
248
+ "learning_rate": 2.8986893714723046e-05,
249
+ "loss": 0.8638,
250
+ "step": 2200
251
+ },
252
+ {
253
+ "epoch": 14.0,
254
+ "eval_loss": 0.6328504085540771,
255
+ "eval_runtime": 134.3957,
256
+ "eval_samples_per_second": 11.161,
257
+ "eval_wer": 0.37897460637691327,
258
+ "step": 2240
259
+ },
260
+ {
261
+ "epoch": 14.37,
262
+ "learning_rate": 2.8843394240887784e-05,
263
+ "loss": 0.8171,
264
+ "step": 2300
265
+ },
266
+ {
267
+ "epoch": 15.0,
268
+ "learning_rate": 2.869989476705252e-05,
269
+ "loss": 0.7853,
270
+ "step": 2400
271
+ },
272
+ {
273
+ "epoch": 15.0,
274
+ "eval_loss": 0.5940719246864319,
275
+ "eval_runtime": 134.4528,
276
+ "eval_samples_per_second": 11.156,
277
+ "eval_wer": 0.3550721459585106,
278
+ "step": 2400
279
+ },
280
+ {
281
+ "epoch": 15.62,
282
+ "learning_rate": 2.855639529321726e-05,
283
+ "loss": 0.753,
284
+ "step": 2500
285
+ },
286
+ {
287
+ "epoch": 16.0,
288
+ "eval_loss": 0.5752812623977661,
289
+ "eval_runtime": 135.1505,
290
+ "eval_samples_per_second": 11.099,
291
+ "eval_wer": 0.34020437700100875,
292
+ "step": 2560
293
+ },
294
+ {
295
+ "epoch": 16.25,
296
+ "learning_rate": 2.8412895819381997e-05,
297
+ "loss": 0.7261,
298
+ "step": 2600
299
+ },
300
+ {
301
+ "epoch": 16.87,
302
+ "learning_rate": 2.8269396345546735e-05,
303
+ "loss": 0.7005,
304
+ "step": 2700
305
+ },
306
+ {
307
+ "epoch": 17.0,
308
+ "eval_loss": 0.512348473072052,
309
+ "eval_runtime": 134.1054,
310
+ "eval_samples_per_second": 11.185,
311
+ "eval_wer": 0.3196351037235209,
312
+ "step": 2720
313
+ },
314
+ {
315
+ "epoch": 17.5,
316
+ "learning_rate": 2.8125896871711473e-05,
317
+ "loss": 0.6796,
318
+ "step": 2800
319
+ },
320
+ {
321
+ "epoch": 18.0,
322
+ "eval_loss": 0.5050208568572998,
323
+ "eval_runtime": 134.5141,
324
+ "eval_samples_per_second": 11.151,
325
+ "eval_wer": 0.3070917942195518,
326
+ "step": 2880
327
+ },
328
+ {
329
+ "epoch": 18.12,
330
+ "learning_rate": 2.798239739787621e-05,
331
+ "loss": 0.6586,
332
+ "step": 2900
333
+ },
334
+ {
335
+ "epoch": 18.75,
336
+ "learning_rate": 2.7838897924040948e-05,
337
+ "loss": 0.6396,
338
+ "step": 3000
339
+ },
340
+ {
341
+ "epoch": 19.0,
342
+ "eval_loss": 0.5050458312034607,
343
+ "eval_runtime": 134.3675,
344
+ "eval_samples_per_second": 11.163,
345
+ "eval_wer": 0.296697513266962,
346
+ "step": 3040
347
+ },
348
+ {
349
+ "epoch": 19.37,
350
+ "learning_rate": 2.7695398450205686e-05,
351
+ "loss": 0.6189,
352
+ "step": 3100
353
+ },
354
+ {
355
+ "epoch": 20.0,
356
+ "learning_rate": 2.7551898976370424e-05,
357
+ "loss": 0.6127,
358
+ "step": 3200
359
+ },
360
+ {
361
+ "epoch": 20.0,
362
+ "eval_loss": 0.46447858214378357,
363
+ "eval_runtime": 133.8616,
364
+ "eval_samples_per_second": 11.206,
365
+ "eval_wer": 0.28691724047190914,
366
+ "step": 3200
367
+ },
368
+ {
369
+ "epoch": 20.62,
370
+ "learning_rate": 2.7408399502535158e-05,
371
+ "loss": 0.5993,
372
+ "step": 3300
373
+ },
374
+ {
375
+ "epoch": 21.0,
376
+ "eval_loss": 0.4493682384490967,
377
+ "eval_runtime": 134.4621,
378
+ "eval_samples_per_second": 11.156,
379
+ "eval_wer": 0.28011929301346433,
380
+ "step": 3360
381
+ },
382
+ {
383
+ "epoch": 21.25,
384
+ "learning_rate": 2.7264900028699892e-05,
385
+ "loss": 0.5843,
386
+ "step": 3400
387
+ },
388
+ {
389
+ "epoch": 21.87,
390
+ "learning_rate": 2.712140055486463e-05,
391
+ "loss": 0.58,
392
+ "step": 3500
393
+ },
394
+ {
395
+ "epoch": 22.0,
396
+ "eval_loss": 0.44041261076927185,
397
+ "eval_runtime": 134.062,
398
+ "eval_samples_per_second": 11.189,
399
+ "eval_wer": 0.2688039998245691,
400
+ "step": 3520
401
+ },
402
+ {
403
+ "epoch": 22.5,
404
+ "learning_rate": 2.6977901081029368e-05,
405
+ "loss": 0.5584,
406
+ "step": 3600
407
+ },
408
+ {
409
+ "epoch": 23.0,
410
+ "eval_loss": 0.45153650641441345,
411
+ "eval_runtime": 133.1081,
412
+ "eval_samples_per_second": 11.269,
413
+ "eval_wer": 0.27783869128546995,
414
+ "step": 3680
415
+ },
416
+ {
417
+ "epoch": 23.12,
418
+ "learning_rate": 2.6834401607194106e-05,
419
+ "loss": 0.5549,
420
+ "step": 3700
421
+ },
422
+ {
423
+ "epoch": 23.75,
424
+ "learning_rate": 2.6690902133358844e-05,
425
+ "loss": 0.5425,
426
+ "step": 3800
427
+ },
428
+ {
429
+ "epoch": 24.0,
430
+ "eval_loss": 0.42425668239593506,
431
+ "eval_runtime": 133.8644,
432
+ "eval_samples_per_second": 11.205,
433
+ "eval_wer": 0.2665233980965747,
434
+ "step": 3840
435
+ },
436
+ {
437
+ "epoch": 24.37,
438
+ "learning_rate": 2.654740265952358e-05,
439
+ "loss": 0.531,
440
+ "step": 3900
441
+ },
442
+ {
443
+ "epoch": 25.0,
444
+ "learning_rate": 2.640390318568832e-05,
445
+ "loss": 0.5273,
446
+ "step": 4000
447
+ },
448
+ {
449
+ "epoch": 25.0,
450
+ "eval_loss": 0.43647119402885437,
451
+ "eval_runtime": 132.965,
452
+ "eval_samples_per_second": 11.281,
453
+ "eval_wer": 0.26696197535195826,
454
+ "step": 4000
455
+ },
456
+ {
457
+ "epoch": 25.62,
458
+ "learning_rate": 2.6260403711853057e-05,
459
+ "loss": 0.5147,
460
+ "step": 4100
461
+ },
462
+ {
463
+ "epoch": 26.0,
464
+ "eval_loss": 0.41047143936157227,
465
+ "eval_runtime": 135.0829,
466
+ "eval_samples_per_second": 11.104,
467
+ "eval_wer": 0.24959431603877022,
468
+ "step": 4160
469
+ },
470
+ {
471
+ "epoch": 26.25,
472
+ "learning_rate": 2.6116904238017795e-05,
473
+ "loss": 0.5136,
474
+ "step": 4200
475
+ },
476
+ {
477
+ "epoch": 26.87,
478
+ "learning_rate": 2.5973404764182532e-05,
479
+ "loss": 0.5042,
480
+ "step": 4300
481
+ },
482
+ {
483
+ "epoch": 27.0,
484
+ "eval_loss": 0.4049687385559082,
485
+ "eval_runtime": 134.3457,
486
+ "eval_samples_per_second": 11.165,
487
+ "eval_wer": 0.2538046576904522,
488
+ "step": 4320
489
+ },
490
+ {
491
+ "epoch": 27.5,
492
+ "learning_rate": 2.582990529034727e-05,
493
+ "loss": 0.4925,
494
+ "step": 4400
495
+ },
496
+ {
497
+ "epoch": 28.0,
498
+ "eval_loss": 0.41241469979286194,
499
+ "eval_runtime": 134.126,
500
+ "eval_samples_per_second": 11.184,
501
+ "eval_wer": 0.251173194158151,
502
+ "step": 4480
503
+ },
504
+ {
505
+ "epoch": 28.12,
506
+ "learning_rate": 2.5686405816512008e-05,
507
+ "loss": 0.4925,
508
+ "step": 4500
509
+ },
510
+ {
511
+ "epoch": 28.75,
512
+ "learning_rate": 2.5542906342676746e-05,
513
+ "loss": 0.4753,
514
+ "step": 4600
515
+ },
516
+ {
517
+ "epoch": 29.0,
518
+ "eval_loss": 0.40935131907463074,
519
+ "eval_runtime": 134.4907,
520
+ "eval_samples_per_second": 11.153,
521
+ "eval_wer": 0.24437524669970614,
522
+ "step": 4640
523
+ },
524
+ {
525
+ "epoch": 29.37,
526
+ "learning_rate": 2.5399406868841483e-05,
527
+ "loss": 0.4773,
528
+ "step": 4700
529
+ },
530
+ {
531
+ "epoch": 30.0,
532
+ "learning_rate": 2.525590739500622e-05,
533
+ "loss": 0.4706,
534
+ "step": 4800
535
+ },
536
+ {
537
+ "epoch": 30.0,
538
+ "eval_loss": 0.3854382336139679,
539
+ "eval_runtime": 134.5082,
540
+ "eval_samples_per_second": 11.152,
541
+ "eval_wer": 0.23424411210034649,
542
+ "step": 4800
543
+ },
544
+ {
545
+ "epoch": 30.62,
546
+ "learning_rate": 2.511240792117096e-05,
547
+ "loss": 0.4625,
548
+ "step": 4900
549
+ },
550
+ {
551
+ "epoch": 31.0,
552
+ "eval_loss": 0.3864579200744629,
553
+ "eval_runtime": 134.0786,
554
+ "eval_samples_per_second": 11.187,
555
+ "eval_wer": 0.23937546598833384,
556
+ "step": 4960
557
+ },
558
+ {
559
+ "epoch": 31.25,
560
+ "learning_rate": 2.4968908447335693e-05,
561
+ "loss": 0.4586,
562
+ "step": 5000
563
+ },
564
+ {
565
+ "epoch": 31.87,
566
+ "learning_rate": 2.482540897350043e-05,
567
+ "loss": 0.4512,
568
+ "step": 5100
569
+ },
570
+ {
571
+ "epoch": 32.0,
572
+ "eval_loss": 0.3823211193084717,
573
+ "eval_runtime": 134.3959,
574
+ "eval_samples_per_second": 11.161,
575
+ "eval_wer": 0.2313056444892768,
576
+ "step": 5120
577
+ },
578
+ {
579
+ "epoch": 32.5,
580
+ "learning_rate": 2.468190949966517e-05,
581
+ "loss": 0.4445,
582
+ "step": 5200
583
+ },
584
+ {
585
+ "epoch": 33.0,
586
+ "eval_loss": 0.38146257400512695,
587
+ "eval_runtime": 134.7806,
588
+ "eval_samples_per_second": 11.129,
589
+ "eval_wer": 0.23472654708126836,
590
+ "step": 5280
591
+ },
592
+ {
593
+ "epoch": 33.12,
594
+ "learning_rate": 2.4538410025829903e-05,
595
+ "loss": 0.4451,
596
+ "step": 5300
597
+ },
598
+ {
599
+ "epoch": 33.75,
600
+ "learning_rate": 2.439491055199464e-05,
601
+ "loss": 0.435,
602
+ "step": 5400
603
+ },
604
+ {
605
+ "epoch": 34.0,
606
+ "eval_loss": 0.3864315450191498,
607
+ "eval_runtime": 134.161,
608
+ "eval_samples_per_second": 11.181,
609
+ "eval_wer": 0.23200736809789044,
610
+ "step": 5440
611
+ },
612
+ {
613
+ "epoch": 34.37,
614
+ "learning_rate": 2.425141107815938e-05,
615
+ "loss": 0.4342,
616
+ "step": 5500
617
+ },
618
+ {
619
+ "epoch": 35.0,
620
+ "learning_rate": 2.4107911604324117e-05,
621
+ "loss": 0.4284,
622
+ "step": 5600
623
+ },
624
+ {
625
+ "epoch": 35.0,
626
+ "eval_loss": 0.370952308177948,
627
+ "eval_runtime": 134.1583,
628
+ "eval_samples_per_second": 11.181,
629
+ "eval_wer": 0.22547256699267576,
630
+ "step": 5600
631
+ },
632
+ {
633
+ "epoch": 35.62,
634
+ "learning_rate": 2.3964412130488854e-05,
635
+ "loss": 0.4271,
636
+ "step": 5700
637
+ },
638
+ {
639
+ "epoch": 36.0,
640
+ "eval_loss": 0.3921850621700287,
641
+ "eval_runtime": 133.9964,
642
+ "eval_samples_per_second": 11.194,
643
+ "eval_wer": 0.2336739616683479,
644
+ "step": 5760
645
+ },
646
+ {
647
+ "epoch": 36.25,
648
+ "learning_rate": 2.3820912656653592e-05,
649
+ "loss": 0.4137,
650
+ "step": 5800
651
+ },
652
+ {
653
+ "epoch": 36.87,
654
+ "learning_rate": 2.367741318281833e-05,
655
+ "loss": 0.4143,
656
+ "step": 5900
657
+ },
658
+ {
659
+ "epoch": 37.0,
660
+ "eval_loss": 0.37270987033843994,
661
+ "eval_runtime": 133.8613,
662
+ "eval_samples_per_second": 11.206,
663
+ "eval_wer": 0.2222270953028376,
664
+ "step": 5920
665
+ },
666
+ {
667
+ "epoch": 37.5,
668
+ "learning_rate": 2.3533913708983068e-05,
669
+ "loss": 0.4125,
670
+ "step": 6000
671
+ },
672
+ {
673
+ "epoch": 38.0,
674
+ "eval_loss": 0.37099114060401917,
675
+ "eval_runtime": 133.4063,
676
+ "eval_samples_per_second": 11.244,
677
+ "eval_wer": 0.21977106267268978,
678
+ "step": 6080
679
+ },
680
+ {
681
+ "epoch": 38.12,
682
+ "learning_rate": 2.3390414235147805e-05,
683
+ "loss": 0.4025,
684
+ "step": 6100
685
+ },
686
+ {
687
+ "epoch": 38.75,
688
+ "learning_rate": 2.3246914761312543e-05,
689
+ "loss": 0.4011,
690
+ "step": 6200
691
+ },
692
+ {
693
+ "epoch": 39.0,
694
+ "eval_loss": 0.36733388900756836,
695
+ "eval_runtime": 134.2886,
696
+ "eval_samples_per_second": 11.17,
697
+ "eval_wer": 0.2247708433840621,
698
+ "step": 6240
699
+ },
700
+ {
701
+ "epoch": 39.37,
702
+ "learning_rate": 2.310341528747728e-05,
703
+ "loss": 0.395,
704
+ "step": 6300
705
+ },
706
+ {
707
+ "epoch": 40.0,
708
+ "learning_rate": 2.295991581364202e-05,
709
+ "loss": 0.3976,
710
+ "step": 6400
711
+ },
712
+ {
713
+ "epoch": 40.0,
714
+ "eval_loss": 0.38662710785865784,
715
+ "eval_runtime": 134.9794,
716
+ "eval_samples_per_second": 11.113,
717
+ "eval_wer": 0.22384983114775667,
718
+ "step": 6400
719
+ },
720
+ {
721
+ "epoch": 40.62,
722
+ "learning_rate": 2.2816416339806756e-05,
723
+ "loss": 0.3912,
724
+ "step": 6500
725
+ },
726
+ {
727
+ "epoch": 41.0,
728
+ "eval_loss": 0.368117094039917,
729
+ "eval_runtime": 134.9215,
730
+ "eval_samples_per_second": 11.118,
731
+ "eval_wer": 0.22082364808561028,
732
+ "step": 6560
733
+ },
734
+ {
735
+ "epoch": 41.25,
736
+ "learning_rate": 2.2672916865971494e-05,
737
+ "loss": 0.3841,
738
+ "step": 6600
739
+ },
740
+ {
741
+ "epoch": 41.87,
742
+ "learning_rate": 2.252941739213623e-05,
743
+ "loss": 0.3856,
744
+ "step": 6700
745
+ },
746
+ {
747
+ "epoch": 42.0,
748
+ "eval_loss": 0.34648868441581726,
749
+ "eval_runtime": 135.1982,
750
+ "eval_samples_per_second": 11.095,
751
+ "eval_wer": 0.21569229419762292,
752
+ "step": 6720
753
+ },
754
+ {
755
+ "epoch": 42.5,
756
+ "learning_rate": 2.2385917918300966e-05,
757
+ "loss": 0.3814,
758
+ "step": 6800
759
+ },
760
+ {
761
+ "epoch": 43.0,
762
+ "eval_loss": 0.35718342661857605,
763
+ "eval_runtime": 134.5426,
764
+ "eval_samples_per_second": 11.149,
765
+ "eval_wer": 0.2124906802333231,
766
+ "step": 6880
767
+ },
768
+ {
769
+ "epoch": 43.12,
770
+ "learning_rate": 2.2242418444465704e-05,
771
+ "loss": 0.3765,
772
+ "step": 6900
773
+ },
774
+ {
775
+ "epoch": 43.75,
776
+ "learning_rate": 2.2098918970630442e-05,
777
+ "loss": 0.3731,
778
+ "step": 7000
779
+ },
780
+ {
781
+ "epoch": 44.0,
782
+ "eval_loss": 0.3691784143447876,
783
+ "eval_runtime": 134.9182,
784
+ "eval_samples_per_second": 11.118,
785
+ "eval_wer": 0.21021007850532872,
786
+ "step": 7040
787
+ },
788
+ {
789
+ "epoch": 44.37,
790
+ "learning_rate": 2.195541949679518e-05,
791
+ "loss": 0.3679,
792
+ "step": 7100
793
+ },
794
+ {
795
+ "epoch": 45.0,
796
+ "learning_rate": 2.1811920022959917e-05,
797
+ "loss": 0.363,
798
+ "step": 7200
799
+ },
800
+ {
801
+ "epoch": 45.0,
802
+ "eval_loss": 0.3651253283023834,
803
+ "eval_runtime": 136.1991,
804
+ "eval_samples_per_second": 11.013,
805
+ "eval_wer": 0.21183281435024778,
806
+ "step": 7200
807
+ },
808
+ {
809
+ "epoch": 45.62,
810
+ "learning_rate": 2.1668420549124652e-05,
811
+ "loss": 0.3628,
812
+ "step": 7300
813
+ },
814
+ {
815
+ "epoch": 46.0,
816
+ "eval_loss": 0.35606908798217773,
817
+ "eval_runtime": 136.1343,
818
+ "eval_samples_per_second": 11.019,
819
+ "eval_wer": 0.20889434673917812,
820
+ "step": 7360
821
+ },
822
+ {
823
+ "epoch": 46.25,
824
+ "learning_rate": 2.152492107528939e-05,
825
+ "loss": 0.364,
826
+ "step": 7400
827
+ },
828
+ {
829
+ "epoch": 46.87,
830
+ "learning_rate": 2.1381421601454127e-05,
831
+ "loss": 0.3582,
832
+ "step": 7500
833
+ },
834
+ {
835
+ "epoch": 47.0,
836
+ "eval_loss": 0.3635926842689514,
837
+ "eval_runtime": 136.0098,
838
+ "eval_samples_per_second": 11.029,
839
+ "eval_wer": 0.20714003771764397,
840
+ "step": 7520
841
+ },
842
+ {
843
+ "epoch": 47.5,
844
+ "learning_rate": 2.1237922127618865e-05,
845
+ "loss": 0.3563,
846
+ "step": 7600
847
+ },
848
+ {
849
+ "epoch": 48.0,
850
+ "eval_loss": 0.3590168356895447,
851
+ "eval_runtime": 136.0862,
852
+ "eval_samples_per_second": 11.022,
853
+ "eval_wer": 0.21227139160563133,
854
+ "step": 7680
855
+ },
856
+ {
857
+ "epoch": 48.12,
858
+ "learning_rate": 2.1094422653783603e-05,
859
+ "loss": 0.3535,
860
+ "step": 7700
861
+ },
862
+ {
863
+ "epoch": 48.75,
864
+ "learning_rate": 2.095092317994834e-05,
865
+ "loss": 0.3518,
866
+ "step": 7800
867
+ },
868
+ {
869
+ "epoch": 49.0,
870
+ "eval_loss": 0.3534136414527893,
871
+ "eval_runtime": 135.7077,
872
+ "eval_samples_per_second": 11.053,
873
+ "eval_wer": 0.2056050173238016,
874
+ "step": 7840
875
+ },
876
+ {
877
+ "epoch": 49.37,
878
+ "learning_rate": 2.080742370611308e-05,
879
+ "loss": 0.3483,
880
+ "step": 7900
881
+ },
882
+ {
883
+ "epoch": 50.0,
884
+ "learning_rate": 2.0663924232277816e-05,
885
+ "loss": 0.3482,
886
+ "step": 8000
887
+ },
888
+ {
889
+ "epoch": 50.0,
890
+ "eval_loss": 0.35351213812828064,
891
+ "eval_runtime": 135.799,
892
+ "eval_samples_per_second": 11.046,
893
+ "eval_wer": 0.2045524319108811,
894
+ "step": 8000
895
+ },
896
+ {
897
+ "epoch": 50.62,
898
+ "learning_rate": 2.0521859753180906e-05,
899
+ "loss": 0.345,
900
+ "step": 8100
901
+ },
902
+ {
903
+ "epoch": 51.0,
904
+ "eval_loss": 0.35218343138694763,
905
+ "eval_runtime": 137.1629,
906
+ "eval_samples_per_second": 10.936,
907
+ "eval_wer": 0.20389456602780578,
908
+ "step": 8160
909
+ },
910
+ {
911
+ "epoch": 51.25,
912
+ "learning_rate": 2.037836027934564e-05,
913
+ "loss": 0.3384,
914
+ "step": 8200
915
+ },
916
+ {
917
+ "epoch": 51.87,
918
+ "learning_rate": 2.0234860805510378e-05,
919
+ "loss": 0.3379,
920
+ "step": 8300
921
+ },
922
+ {
923
+ "epoch": 52.0,
924
+ "eval_loss": 0.3376038074493408,
925
+ "eval_runtime": 135.192,
926
+ "eval_samples_per_second": 11.095,
927
+ "eval_wer": 0.20165782202534976,
928
+ "step": 8320
929
+ },
930
+ {
931
+ "epoch": 52.5,
932
+ "learning_rate": 2.0091361331675116e-05,
933
+ "loss": 0.3324,
934
+ "step": 8400
935
+ },
936
+ {
937
+ "epoch": 53.0,
938
+ "eval_loss": 0.349118173122406,
939
+ "eval_runtime": 136.9229,
940
+ "eval_samples_per_second": 10.955,
941
+ "eval_wer": 0.20428928555765097,
942
+ "step": 8480
943
+ },
944
+ {
945
+ "epoch": 53.12,
946
+ "learning_rate": 1.9947861857839854e-05,
947
+ "loss": 0.3352,
948
+ "step": 8500
949
+ },
950
+ {
951
+ "epoch": 53.75,
952
+ "learning_rate": 1.980436238400459e-05,
953
+ "loss": 0.3329,
954
+ "step": 8600
955
+ },
956
+ {
957
+ "epoch": 54.0,
958
+ "eval_loss": 0.3493926525115967,
959
+ "eval_runtime": 135.7895,
960
+ "eval_samples_per_second": 11.047,
961
+ "eval_wer": 0.20196482610411823,
962
+ "step": 8640
963
+ },
964
+ {
965
+ "epoch": 54.37,
966
+ "learning_rate": 1.966086291016933e-05,
967
+ "loss": 0.3264,
968
+ "step": 8700
969
+ },
970
+ {
971
+ "epoch": 55.0,
972
+ "learning_rate": 1.9517363436334067e-05,
973
+ "loss": 0.3303,
974
+ "step": 8800
975
+ },
976
+ {
977
+ "epoch": 55.0,
978
+ "eval_loss": 0.3516405522823334,
979
+ "eval_runtime": 134.0631,
980
+ "eval_samples_per_second": 11.189,
981
+ "eval_wer": 0.19902635849304856,
982
+ "step": 8800
983
+ },
984
+ {
985
+ "epoch": 55.62,
986
+ "learning_rate": 1.9373863962498805e-05,
987
+ "loss": 0.3258,
988
+ "step": 8900
989
+ },
990
+ {
991
+ "epoch": 56.0,
992
+ "eval_loss": 0.3470868468284607,
993
+ "eval_runtime": 134.5501,
994
+ "eval_samples_per_second": 11.148,
995
+ "eval_wer": 0.20385070830226745,
996
+ "step": 8960
997
+ },
998
+ {
999
+ "epoch": 56.25,
1000
+ "learning_rate": 1.9230364488663542e-05,
1001
+ "loss": 0.3261,
1002
+ "step": 9000
1003
+ },
1004
+ {
1005
+ "epoch": 56.87,
1006
+ "learning_rate": 1.908686501482828e-05,
1007
+ "loss": 0.3189,
1008
+ "step": 9100
1009
+ },
1010
+ {
1011
+ "epoch": 57.0,
1012
+ "eval_loss": 0.3370627164840698,
1013
+ "eval_runtime": 134.7548,
1014
+ "eval_samples_per_second": 11.131,
1015
+ "eval_wer": 0.20007894390596903,
1016
+ "step": 9120
1017
+ },
1018
+ {
1019
+ "epoch": 57.5,
1020
+ "learning_rate": 1.8943365540993018e-05,
1021
+ "loss": 0.3137,
1022
+ "step": 9200
1023
+ },
1024
+ {
1025
+ "epoch": 58.0,
1026
+ "eval_loss": 0.34424567222595215,
1027
+ "eval_runtime": 135.5794,
1028
+ "eval_samples_per_second": 11.064,
1029
+ "eval_wer": 0.20196482610411823,
1030
+ "step": 9280
1031
+ },
1032
+ {
1033
+ "epoch": 58.12,
1034
+ "learning_rate": 1.8799866067157756e-05,
1035
+ "loss": 0.3187,
1036
+ "step": 9300
1037
+ },
1038
+ {
1039
+ "epoch": 58.75,
1040
+ "learning_rate": 1.8656366593322493e-05,
1041
+ "loss": 0.3166,
1042
+ "step": 9400
1043
+ },
1044
+ {
1045
+ "epoch": 59.0,
1046
+ "eval_loss": 0.347896009683609,
1047
+ "eval_runtime": 136.6913,
1048
+ "eval_samples_per_second": 10.974,
1049
+ "eval_wer": 0.20288583834042367,
1050
+ "step": 9440
1051
+ },
1052
+ {
1053
+ "epoch": 59.37,
1054
+ "learning_rate": 1.851286711948723e-05,
1055
+ "loss": 0.3135,
1056
+ "step": 9500
1057
+ },
1058
+ {
1059
+ "epoch": 60.0,
1060
+ "learning_rate": 1.836936764565197e-05,
1061
+ "loss": 0.3128,
1062
+ "step": 9600
1063
+ },
1064
+ {
1065
+ "epoch": 60.0,
1066
+ "eval_loss": 0.3388006091117859,
1067
+ "eval_runtime": 135.2694,
1068
+ "eval_samples_per_second": 11.089,
1069
+ "eval_wer": 0.1940704355072146,
1070
+ "step": 9600
1071
+ },
1072
+ {
1073
+ "epoch": 60.62,
1074
+ "learning_rate": 1.8225868171816707e-05,
1075
+ "loss": 0.3073,
1076
+ "step": 9700
1077
+ },
1078
+ {
1079
+ "epoch": 61.0,
1080
+ "eval_loss": 0.3377033472061157,
1081
+ "eval_runtime": 135.4444,
1082
+ "eval_samples_per_second": 11.075,
1083
+ "eval_wer": 0.19205298013245034,
1084
+ "step": 9760
1085
+ },
1086
+ {
1087
+ "epoch": 61.25,
1088
+ "learning_rate": 1.808236869798144e-05,
1089
+ "loss": 0.3061,
1090
+ "step": 9800
1091
+ },
1092
+ {
1093
+ "epoch": 61.87,
1094
+ "learning_rate": 1.794030421888453e-05,
1095
+ "loss": 0.3053,
1096
+ "step": 9900
1097
+ },
1098
+ {
1099
+ "epoch": 62.0,
1100
+ "eval_loss": 0.3457684814929962,
1101
+ "eval_runtime": 134.2365,
1102
+ "eval_samples_per_second": 11.174,
1103
+ "eval_wer": 0.19551774044998027,
1104
+ "step": 9920
1105
+ },
1106
+ {
1107
+ "epoch": 62.5,
1108
+ "learning_rate": 1.779680474504927e-05,
1109
+ "loss": 0.3021,
1110
+ "step": 10000
1111
+ },
1112
+ {
1113
+ "epoch": 63.0,
1114
+ "eval_loss": 0.33506670594215393,
1115
+ "eval_runtime": 135.3785,
1116
+ "eval_samples_per_second": 11.08,
1117
+ "eval_wer": 0.19490373229244332,
1118
+ "step": 10080
1119
+ },
1120
+ {
1121
+ "epoch": 63.12,
1122
+ "learning_rate": 1.7653305271214006e-05,
1123
+ "loss": 0.3021,
1124
+ "step": 10100
1125
+ },
1126
+ {
1127
+ "epoch": 63.75,
1128
+ "learning_rate": 1.7509805797378744e-05,
1129
+ "loss": 0.2972,
1130
+ "step": 10200
1131
+ },
1132
+ {
1133
+ "epoch": 64.0,
1134
+ "eval_loss": 0.3553718328475952,
1135
+ "eval_runtime": 135.8925,
1136
+ "eval_samples_per_second": 11.038,
1137
+ "eval_wer": 0.19806148853120478,
1138
+ "step": 10240
1139
+ },
1140
+ {
1141
+ "epoch": 64.37,
1142
+ "learning_rate": 1.7366306323543482e-05,
1143
+ "loss": 0.2987,
1144
+ "step": 10300
1145
+ },
1146
+ {
1147
+ "epoch": 65.0,
1148
+ "learning_rate": 1.7222806849708216e-05,
1149
+ "loss": 0.2951,
1150
+ "step": 10400
1151
+ },
1152
+ {
1153
+ "epoch": 65.0,
1154
+ "eval_loss": 0.35284093022346497,
1155
+ "eval_runtime": 136.2097,
1156
+ "eval_samples_per_second": 11.012,
1157
+ "eval_wer": 0.20051752116135257,
1158
+ "step": 10400
1159
+ },
1160
+ {
1161
+ "epoch": 65.62,
1162
+ "learning_rate": 1.7079307375872954e-05,
1163
+ "loss": 0.2972,
1164
+ "step": 10500
1165
+ },
1166
+ {
1167
+ "epoch": 66.0,
1168
+ "eval_loss": 0.34928229451179504,
1169
+ "eval_runtime": 135.0832,
1170
+ "eval_samples_per_second": 11.104,
1171
+ "eval_wer": 0.197842199903513,
1172
+ "step": 10560
1173
+ },
1174
+ {
1175
+ "epoch": 66.25,
1176
+ "learning_rate": 1.6935807902037692e-05,
1177
+ "loss": 0.2932,
1178
+ "step": 10600
1179
+ },
1180
+ {
1181
+ "epoch": 66.87,
1182
+ "learning_rate": 1.679230842820243e-05,
1183
+ "loss": 0.2923,
1184
+ "step": 10700
1185
+ },
1186
+ {
1187
+ "epoch": 67.0,
1188
+ "eval_loss": 0.35474905371665955,
1189
+ "eval_runtime": 136.3777,
1190
+ "eval_samples_per_second": 10.999,
1191
+ "eval_wer": 0.1951230209201351,
1192
+ "step": 10720
1193
+ },
1194
+ {
1195
+ "epoch": 67.5,
1196
+ "learning_rate": 1.6648808954367167e-05,
1197
+ "loss": 0.2944,
1198
+ "step": 10800
1199
+ },
1200
+ {
1201
+ "epoch": 68.0,
1202
+ "eval_loss": 0.3355397582054138,
1203
+ "eval_runtime": 136.8623,
1204
+ "eval_samples_per_second": 10.96,
1205
+ "eval_wer": 0.19354414280075435,
1206
+ "step": 10880
1207
+ },
1208
+ {
1209
+ "epoch": 68.12,
1210
+ "learning_rate": 1.6505309480531905e-05,
1211
+ "loss": 0.2921,
1212
+ "step": 10900
1213
+ },
1214
+ {
1215
+ "epoch": 68.75,
1216
+ "learning_rate": 1.6361810006696643e-05,
1217
+ "loss": 0.2898,
1218
+ "step": 11000
1219
+ },
1220
+ {
1221
+ "epoch": 69.0,
1222
+ "eval_loss": 0.3218751549720764,
1223
+ "eval_runtime": 134.7659,
1224
+ "eval_samples_per_second": 11.13,
1225
+ "eval_wer": 0.1917898337792202,
1226
+ "step": 11040
1227
+ },
1228
+ {
1229
+ "epoch": 69.37,
1230
+ "learning_rate": 1.621831053286138e-05,
1231
+ "loss": 0.2838,
1232
+ "step": 11100
1233
+ },
1234
+ {
1235
+ "epoch": 70.0,
1236
+ "learning_rate": 1.607481105902612e-05,
1237
+ "loss": 0.2841,
1238
+ "step": 11200
1239
+ },
1240
+ {
1241
+ "epoch": 70.0,
1242
+ "eval_loss": 0.328848272562027,
1243
+ "eval_runtime": 135.8246,
1244
+ "eval_samples_per_second": 11.044,
1245
+ "eval_wer": 0.1837638700057015,
1246
+ "step": 11200
1247
+ },
1248
+ {
1249
+ "epoch": 70.62,
1250
+ "learning_rate": 1.5931311585190853e-05,
1251
+ "loss": 0.2869,
1252
+ "step": 11300
1253
+ },
1254
+ {
1255
+ "epoch": 71.0,
1256
+ "eval_loss": 0.3385452330112457,
1257
+ "eval_runtime": 136.3804,
1258
+ "eval_samples_per_second": 10.999,
1259
+ "eval_wer": 0.19521073637121178,
1260
+ "step": 11360
1261
+ },
1262
+ {
1263
+ "epoch": 71.25,
1264
+ "learning_rate": 1.578781211135559e-05,
1265
+ "loss": 0.2824,
1266
+ "step": 11400
1267
+ },
1268
+ {
1269
+ "epoch": 71.87,
1270
+ "learning_rate": 1.564431263752033e-05,
1271
+ "loss": 0.2798,
1272
+ "step": 11500
1273
+ },
1274
+ {
1275
+ "epoch": 72.0,
1276
+ "eval_loss": 0.33345770835876465,
1277
+ "eval_runtime": 136.5311,
1278
+ "eval_samples_per_second": 10.987,
1279
+ "eval_wer": 0.1847287399675453,
1280
+ "step": 11520
1281
+ },
1282
+ {
1283
+ "epoch": 72.5,
1284
+ "learning_rate": 1.5500813163685066e-05,
1285
+ "loss": 0.279,
1286
+ "step": 11600
1287
+ },
1288
+ {
1289
+ "epoch": 73.0,
1290
+ "eval_loss": 0.3369726836681366,
1291
+ "eval_runtime": 136.9141,
1292
+ "eval_samples_per_second": 10.956,
1293
+ "eval_wer": 0.18871979299153546,
1294
+ "step": 11680
1295
+ },
1296
+ {
1297
+ "epoch": 73.12,
1298
+ "learning_rate": 1.5357313689849804e-05,
1299
+ "loss": 0.2778,
1300
+ "step": 11700
1301
+ },
1302
+ {
1303
+ "epoch": 73.75,
1304
+ "learning_rate": 1.5213814216014542e-05,
1305
+ "loss": 0.276,
1306
+ "step": 11800
1307
+ },
1308
+ {
1309
+ "epoch": 74.0,
1310
+ "eval_loss": 0.33662334084510803,
1311
+ "eval_runtime": 137.0118,
1312
+ "eval_samples_per_second": 10.948,
1313
+ "eval_wer": 0.19503530546905837,
1314
+ "step": 11840
1315
+ },
1316
+ {
1317
+ "epoch": 74.37,
1318
+ "learning_rate": 1.507031474217928e-05,
1319
+ "loss": 0.2792,
1320
+ "step": 11900
1321
+ },
1322
+ {
1323
+ "epoch": 75.0,
1324
+ "learning_rate": 1.4926815268344016e-05,
1325
+ "loss": 0.273,
1326
+ "step": 12000
1327
+ },
1328
+ {
1329
+ "epoch": 75.0,
1330
+ "eval_loss": 0.3384232521057129,
1331
+ "eval_runtime": 136.9469,
1332
+ "eval_samples_per_second": 10.953,
1333
+ "eval_wer": 0.18722863032323145,
1334
+ "step": 12000
1335
+ },
1336
+ {
1337
+ "epoch": 75.62,
1338
+ "learning_rate": 1.4783315794508753e-05,
1339
+ "loss": 0.2756,
1340
+ "step": 12100
1341
+ },
1342
+ {
1343
+ "epoch": 76.0,
1344
+ "eval_loss": 0.33496129512786865,
1345
+ "eval_runtime": 135.0526,
1346
+ "eval_samples_per_second": 11.107,
1347
+ "eval_wer": 0.18389544318231657,
1348
+ "step": 12160
1349
+ },
1350
+ {
1351
+ "epoch": 76.25,
1352
+ "learning_rate": 1.4639816320673491e-05,
1353
+ "loss": 0.2746,
1354
+ "step": 12200
1355
+ },
1356
+ {
1357
+ "epoch": 76.87,
1358
+ "learning_rate": 1.4496316846838229e-05,
1359
+ "loss": 0.2668,
1360
+ "step": 12300
1361
+ },
1362
+ {
1363
+ "epoch": 77.0,
1364
+ "eval_loss": 0.3347358703613281,
1365
+ "eval_runtime": 137.095,
1366
+ "eval_samples_per_second": 10.941,
1367
+ "eval_wer": 0.19091267926845315,
1368
+ "step": 12320
1369
+ },
1370
+ {
1371
+ "epoch": 77.5,
1372
+ "learning_rate": 1.4352817373002967e-05,
1373
+ "loss": 0.2713,
1374
+ "step": 12400
1375
+ },
1376
+ {
1377
+ "epoch": 78.0,
1378
+ "eval_loss": 0.3463807702064514,
1379
+ "eval_runtime": 135.9673,
1380
+ "eval_samples_per_second": 11.032,
1381
+ "eval_wer": 0.18714091487215473,
1382
+ "step": 12480
1383
+ },
1384
+ {
1385
+ "epoch": 78.12,
1386
+ "learning_rate": 1.4209317899167704e-05,
1387
+ "loss": 0.2636,
1388
+ "step": 12500
1389
+ },
1390
+ {
1391
+ "epoch": 78.75,
1392
+ "learning_rate": 1.406581842533244e-05,
1393
+ "loss": 0.2685,
1394
+ "step": 12600
1395
+ },
1396
+ {
1397
+ "epoch": 79.0,
1398
+ "eval_loss": 0.35181865096092224,
1399
+ "eval_runtime": 136.9541,
1400
+ "eval_samples_per_second": 10.953,
1401
+ "eval_wer": 0.19306170781983245,
1402
+ "step": 12640
1403
+ },
1404
+ {
1405
+ "epoch": 79.0,
1406
+ "step": 12640,
1407
+ "total_flos": 0,
1408
+ "train_runtime": 205971.9249,
1409
+ "train_samples_per_second": 0.109
1410
+ }
1411
+ ],
1412
+ "max_steps": 22400,
1413
+ "num_train_epochs": 140,
1414
+ "total_flos": 0,
1415
+ "trial_name": null,
1416
+ "trial_params": null
1417
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9592f65593e188d4a169b4c0656cde5e2706f06a580f398fe319347ae191788
3
+ size 2543
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<pad>": 0, "|": 1, "<unk>": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "o": 17, "p": 18, "q": 19, "r": 20, "s": 21, "t": 22, "u": 23, "v": 24, "w": 25, "x": 26, "y": 27, "z": 28, "ç": 29, "ã": 30, "à": 31, "á": 32, "â": 33, "ê": 34, "é": 35, "í": 36, "ó": 37, "ô": 38, "õ": 39, "ú": 40, "û": 41, "-": 42, "<s>": 43, "</s>": 44}