Edresson commited on
Commit
b96927a
1 Parent(s): 49bc98e

Add checkpoints

Browse files
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: pt
3
+ datasets:
4
+ - Common Voice
5
+ metrics:
6
+ - wer
7
+ tags:
8
+ - audio
9
+ - speech
10
+ - wav2vec2
11
+ - pt
12
+ - portuguese-speech-corpus
13
+ - automatic-speech-recognition
14
+ - speech
15
+ - PyTorch
16
+ license: apache-2.0
17
+ model-index:
18
+ - name: Edresson Casanova Wav2vec2 Large 100k Voxpopuli fine-tuned with a single-speaker dataset in Portuguese
19
+ results:
20
+ - task:
21
+ name: Speech Recognition
22
+ type: automatic-speech-recognition
23
+ metrics:
24
+ - name: Test Common Voice 7.0 WER
25
+ type: wer
26
+ value: 63.90
27
+ ---
28
+
29
+ # Wav2vec2 Large 100k Voxpopuli fine-tuned with a single-speaker dataset plus Data Augmentation in Portuguese
30
+
31
+ [Wav2vec2 Large 100k Voxpopuli](https://huggingface.co/facebook/wav2vec2-large-100k-voxpopuli) fine-tuned in Portuguese using a single-speaker dataset (TTS-Portuguese Corpus).
32
+
33
+
34
+
35
+ # Use this model
36
+
37
+ ```python
38
+
39
+ from transformers import AutoTokenizer, Wav2Vec2ForCTC
40
+
41
+ tokenizer = AutoTokenizer.from_pretrained("Edresson/wav2vec2-large-100k-voxpopuli-ft-TTS-Dataset-portuguese")
42
+
43
+ model = Wav2Vec2ForCTC.from_pretrained("Edresson/wav2vec2-large-100k-voxpopuli-ft-TTS-Dataset-portuguese")
44
+ ```
45
+ # Results
46
+ For the results check the [article (Soon)]()
47
+
48
+ # Example test with Common Voice Dataset
49
+
50
+
51
+ ```python
52
+ dataset = load_dataset("common_voice", "pt", split="test", data_dir="./cv-corpus-7.0-2021-07-21")
53
+
54
+ resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
55
+
56
+ def map_to_array(batch):
57
+ speech, _ = torchaudio.load(batch["path"])
58
+ batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
59
+ batch["sampling_rate"] = resampler.new_freq
60
+ batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'")
61
+ return batch
62
+ ```
63
+
64
+ ```python
65
+ ds = dataset.map(map_to_array)
66
+ result = ds.map(map_to_pred, batched=True, batch_size=1, remove_columns=list(ds.features.keys()))
67
+ print(wer.compute(predictions=result["predicted"], references=result["target"]))
68
+ ```
69
+
all_results.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 129.99,
3
+ "eval_loss": 0.6900125741958618,
4
+ "eval_mem_cpu_alloc_delta": 152395776,
5
+ "eval_mem_cpu_peaked_delta": 122880,
6
+ "eval_mem_gpu_alloc_delta": 0,
7
+ "eval_mem_gpu_peaked_delta": 7244027392,
8
+ "eval_runtime": 49.6987,
9
+ "eval_samples": 500,
10
+ "eval_samples_per_second": 10.061,
11
+ "eval_wer": 0.4532554257095159,
12
+ "init_mem_cpu_alloc_delta": 3913703424,
13
+ "init_mem_cpu_peaked_delta": 1208139776,
14
+ "init_mem_gpu_alloc_delta": 1261939712,
15
+ "init_mem_gpu_peaked_delta": 0,
16
+ "train_mem_cpu_alloc_delta": 2117373952,
17
+ "train_mem_cpu_peaked_delta": 24576,
18
+ "train_mem_gpu_alloc_delta": 3778624512,
19
+ "train_mem_gpu_peaked_delta": 9083709440,
20
+ "train_runtime": 94419.6153,
21
+ "train_samples": 3083,
22
+ "train_samples_per_second": 0.024
23
+ }
config.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-100k-voxpopuli",
3
+ "activation_dropout": 0.0,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "Wav2Vec2ForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "codevector_dim": 768,
11
+ "contrastive_logits_temperature": 0.1,
12
+ "conv_bias": true,
13
+ "conv_dim": [
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512,
20
+ 512
21
+ ],
22
+ "conv_kernel": [
23
+ 10,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 3,
28
+ 2,
29
+ 2
30
+ ],
31
+ "conv_stride": [
32
+ 5,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2,
38
+ 2
39
+ ],
40
+ "ctc_loss_reduction": "mean",
41
+ "ctc_zero_infinity": true,
42
+ "diversity_loss_weight": 0.1,
43
+ "do_stable_layer_norm": true,
44
+ "eos_token_id": 2,
45
+ "feat_extract_activation": "gelu",
46
+ "feat_extract_dropout": 0.0,
47
+ "feat_extract_norm": "layer",
48
+ "feat_proj_dropout": 0.1,
49
+ "feat_quantizer_dropout": 0.0,
50
+ "final_dropout": 0.0,
51
+ "gradient_checkpointing": true,
52
+ "hidden_act": "gelu",
53
+ "hidden_dropout": 0.1,
54
+ "hidden_size": 1024,
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 4096,
57
+ "layer_norm_eps": 1e-05,
58
+ "layerdrop": 0.0,
59
+ "mask_channel_length": 10,
60
+ "mask_channel_min_space": 1,
61
+ "mask_channel_other": 0.0,
62
+ "mask_channel_prob": 0.0,
63
+ "mask_channel_selection": "static",
64
+ "mask_feature_length": 10,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_space": 1,
68
+ "mask_time_other": 0.0,
69
+ "mask_time_prob": 0.05,
70
+ "mask_time_selection": "static",
71
+ "model_type": "wav2vec2",
72
+ "num_attention_heads": 16,
73
+ "num_codevector_groups": 2,
74
+ "num_codevectors_per_group": 320,
75
+ "num_conv_pos_embedding_groups": 16,
76
+ "num_conv_pos_embeddings": 128,
77
+ "num_feat_extract_layers": 7,
78
+ "num_hidden_layers": 24,
79
+ "num_negatives": 100,
80
+ "pad_token_id": 0,
81
+ "proj_codevector_dim": 768,
82
+ "transformers_version": "4.6.1",
83
+ "vocab_size": 45
84
+ }
config_train.json ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "run_name": "Wav2Vec-fine-tuning-TEDx",
3
+ "run_description": "Fine tuning TEDx",
4
+ "seed": 42,
5
+ // AUDIO PARAMS
6
+ "sampling_rate": 16000,
7
+
8
+ // VOCABULARY PARAMETERS
9
+ "vocab":{
10
+ "vocab_path": "example/vocab_example.json", // generic vocab for Portuguese
11
+ "blank": "<pad>", // blank token for padding
12
+ "silence": "|", // token between words
13
+ "unk": "<unk>" // unk token
14
+ },
15
+
16
+ // TRAINING
17
+ "batch_size": 8, // Batch size for training.
18
+ "mixed_precision": true, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate.
19
+ "early_stop_epochs": 10, // If 0 disabled else Number of epochs for stop training with validation loss dont decrease
20
+ "preprocess_dataset": false, // if true, the dataset will be pre-processed and saved in disk, otherwise the audio files will be loaded in each step. Preprocessing makes training faster, but requires much more disk space.
21
+
22
+ // OPTIMIZER
23
+ "epochs": 140, // total number of epochs to train.
24
+ "lr": 0.00003, // Initial learning rate.
25
+ "gradient_accumulation_steps": 24,
26
+
27
+ // LOGGING
28
+ "logging_steps": 100, // Number of steps to plot.
29
+ "load_best_model_at_end": true,
30
+ "save_total_limit": 3,
31
+ "warmup_ratio": 0.06666666667, // 0 disable Ratio of total training steps used for a linear warmup from 0 to learning_rate
32
+ "warmup_steps": 0, // 0 disable Number of steps used for a linear warmup from 0 to learning_rate
33
+
34
+ // DATA LOADING
35
+ "num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are goo
36
+
37
+ // MODEL
38
+ "freeze_feature_extractor": true, // Whether to freeze the feature extractor layers of the model.
39
+ "attention_dropout": 0.1, // The dropout ratio for the attention probabilities.
40
+ "activation_dropout": 0.1, // The dropout ratio for activations inside the fully connected layer.
41
+ "hidden_dropout": 0.1, // The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
42
+ "feat_proj_dropout": 0.1, // The dropout probabilitiy for all 1D convolutional layers in feature extractor.
43
+ "mask_time_prob": 0.05, // Propability of each feature vector along the time axis to be chosen as the start of the vector span to be masked.
44
+ "layerdrop": 0.0, // The LayerDrop probability.
45
+ "gradient_checkpointing": true, // If True, use gradient checkpointing to save memory at the expense of slower backward pass.
46
+
47
+ // ToDo: Implement Time mask and Frequency Mask
48
+ "audio_augmentation":[
49
+ // additive noise and room impulse response (RIR) simulation similar to: https://arxiv.org/pdf/2009.14153.pdf
50
+ {
51
+ "name": "additive",
52
+ "sounds_path":"/raid/datasets/DA/musan/speech/", // download: https://www.openslr.org/17/
53
+ "lru_cache_size": 32, // Maximum size of the LRU cache for storing noise files in memory
54
+ "min_snr_in_db": 13.0,
55
+ "max_snr_in_db": 20.0,
56
+ // "sample_rate": 16000,
57
+ "p": 0.25
58
+ },
59
+ {
60
+ "name": "additive",
61
+ "sounds_path":"/raid/datasets/DA/musan/music/", // download: https://www.openslr.org/17/
62
+ "lru_cache_size": 32, // Maximum size of the LRU cache for storing noise files in memory
63
+ "min_snr_in_db": 5.0,
64
+ "max_snr_in_db": 15.0,
65
+ // "sample_rate": 16000,
66
+ "p": 0.25
67
+ },
68
+ {
69
+ "name": "additive",
70
+ "sounds_path":"/raid/datasets/DA/musan/noise/", // download: https://www.openslr.org/17/
71
+ "lru_cache_size": 32, // Maximum size of the LRU cache for storing noise files in memory
72
+ "min_snr_in_db": 0.0,
73
+ "max_snr_in_db": 15.0,
74
+ // "sample_rate": 16000,
75
+ "p": 0.25
76
+ },
77
+ // rir filter proposed by: https://ieeexplore.ieee.org/document/7953152
78
+ {
79
+ "name": "rir",
80
+ "ir_path": "/raid/datasets/DA/RIRS_NOISES/simulated_rirs/", // download: https://www.openslr.org/28/
81
+ "lru_cache_size": 128, // Maximum size of the LRU cache for storing noise files in memory
82
+ // "sample_rate": 16000,
83
+ "p": 0.25
84
+ }
85
+ ,
86
+ // {
87
+ // "name": "gain",
88
+ // "min_gain_in_db": -18.0,
89
+ // "max_gain_in_db": 6,
90
+ // "p": 0.25 // propability of apply this method, 0 is disable
91
+ // },
92
+ {
93
+ "name": "pitch_shift",
94
+ "min_semitones": -4,
95
+ "max_semitones": 4,
96
+ "p": 0.25 // propability of apply this method, 0 is disable
97
+ },
98
+ {
99
+ "name": "gaussian",
100
+ "min_amplitude": 0.0001,
101
+ "max_amplitude": 0.001,
102
+ "p": 0.25 // propability of apply this method, 0 is disable
103
+ }
104
+ ],
105
+ // PATHS
106
+ "output_path": "../checkpoints/YourTTS2ASR/Wav2Vec-voxpopuli/one-speaker/just-TTS/PT/140-epoch-high-bs/",
107
+ // CACHE
108
+ "dataset_cache": "../datasets/",
109
+
110
+ // DATASETS
111
+ "datasets":{
112
+ "files_path": "/raid/datasets/TTS-Portuguese-Corpus/", // relative path for audios It's will be join with the CS
113
+ "train":
114
+ [
115
+ // this dicts is pass directly for the load dataset see the documentation: https://huggingface.co/docs/datasets/package_reference/loading_methods.html#datasets.load_dataset
116
+ {
117
+ "name": "csv",
118
+ "path": "csv",
119
+
120
+ "data_files": ["/raid/datasets/TTS-Portuguese-Corpus/train_TTS-Portuguese_Corpus_metadata_converted_to_ASR.csv"], // csv files
121
+ "text_column": "text",
122
+ "path_column": "file_path"
123
+ }
124
+ ]
125
+ ,
126
+ "devel":
127
+ [
128
+ {
129
+ "name": "csv",
130
+ "path": "csv",
131
+ "data_files": ["/raid/datasets/TTS-Portuguese-Corpus/eval_TTS-Portuguese_Corpus_metadata_converted_to_ASR.csv"], // csv files
132
+ "text_column": "text",
133
+ "path_column": "file_path"
134
+ }
135
+ ]
136
+ ,
137
+ "test":
138
+ {
139
+ "name": "csv",
140
+ "path": "csv",
141
+ "data_files": ["/raid/datasets/Common_Voice/cv-corpus-7.0-2021-07-21/pt/test_converted.csv"], // csv files
142
+ "text_column": "text",
143
+ "path_column": "file_path"
144
+ }
145
+
146
+ }//,
147
+ // used only for test
148
+ // "KenLM":{
149
+ // "kenlm_model_path": "../../kenLM/binaries/subtitle/4-gram/lm.binary", // Path for KenLM model
150
+ // "lexicon_path": "example/lexicon.lst", // file with all words for limit the decoder search
151
+ // "beam": 2048,
152
+ // "nbest": 1,
153
+ // "beam_threshold": 25,
154
+ // "lm_weight": 1,
155
+ // "word_score": -1,
156
+ // "sil_weight": 0
157
+ // }
158
+
159
+
160
+
161
+ }
162
+
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 129.99,
3
+ "eval_loss": 0.6900125741958618,
4
+ "eval_mem_cpu_alloc_delta": 152395776,
5
+ "eval_mem_cpu_peaked_delta": 122880,
6
+ "eval_mem_gpu_alloc_delta": 0,
7
+ "eval_mem_gpu_peaked_delta": 7244027392,
8
+ "eval_runtime": 49.6987,
9
+ "eval_samples": 500,
10
+ "eval_samples_per_second": 10.061,
11
+ "eval_wer": 0.4532554257095159
12
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a0b7787c4b812154e4d737af312cac8d7b1f3197d72e84feb6999aa3007be90
3
+ size 1262108145
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|"}
train_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 129.99,
3
+ "init_mem_cpu_alloc_delta": 3913703424,
4
+ "init_mem_cpu_peaked_delta": 1208139776,
5
+ "init_mem_gpu_alloc_delta": 1261939712,
6
+ "init_mem_gpu_peaked_delta": 0,
7
+ "train_mem_cpu_alloc_delta": 2117373952,
8
+ "train_mem_cpu_peaked_delta": 24576,
9
+ "train_mem_gpu_alloc_delta": 3778624512,
10
+ "train_mem_gpu_peaked_delta": 9083709440,
11
+ "train_runtime": 94419.6153,
12
+ "train_samples": 3083,
13
+ "train_samples_per_second": 0.024
14
+ }
trainer_state.json ADDED
@@ -0,0 +1,1189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6735167503356934,
3
+ "best_model_checkpoint": "../checkpoints/YourTTS2ASR/Wav2Vec-voxpopuli/one-speaker/just-TTS/PT/140-epoch-high-bs/checkpoint-1920",
4
+ "epoch": 129.99481865284974,
5
+ "global_step": 2080,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.06,
12
+ "learning_rate": 2.0000000000000002e-07,
13
+ "loss": 12.0818,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.99,
18
+ "eval_loss": 12.243678092956543,
19
+ "eval_runtime": 49.2931,
20
+ "eval_samples_per_second": 10.143,
21
+ "eval_wer": 1.0009390651085142,
22
+ "step": 16
23
+ },
24
+ {
25
+ "epoch": 1.99,
26
+ "eval_loss": 12.257490158081055,
27
+ "eval_runtime": 50.2087,
28
+ "eval_samples_per_second": 9.958,
29
+ "eval_wer": 1.0005217028380635,
30
+ "step": 32
31
+ },
32
+ {
33
+ "epoch": 2.99,
34
+ "eval_loss": 11.961856842041016,
35
+ "eval_runtime": 48.9816,
36
+ "eval_samples_per_second": 10.208,
37
+ "eval_wer": 0.9989565943238731,
38
+ "step": 48
39
+ },
40
+ {
41
+ "epoch": 3.99,
42
+ "eval_loss": 10.610284805297852,
43
+ "eval_runtime": 49.2335,
44
+ "eval_samples_per_second": 10.156,
45
+ "eval_wer": 0.9995826377295493,
46
+ "step": 64
47
+ },
48
+ {
49
+ "epoch": 4.99,
50
+ "eval_loss": 8.434402465820312,
51
+ "eval_runtime": 50.1138,
52
+ "eval_samples_per_second": 9.977,
53
+ "eval_wer": 1.0,
54
+ "step": 80
55
+ },
56
+ {
57
+ "epoch": 5.99,
58
+ "eval_loss": 6.819848537445068,
59
+ "eval_runtime": 50.2226,
60
+ "eval_samples_per_second": 9.956,
61
+ "eval_wer": 1.0,
62
+ "step": 96
63
+ },
64
+ {
65
+ "epoch": 6.25,
66
+ "learning_rate": 1.9999999999999998e-05,
67
+ "loss": 11.0338,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 6.99,
72
+ "eval_loss": 5.784036159515381,
73
+ "eval_runtime": 49.485,
74
+ "eval_samples_per_second": 10.104,
75
+ "eval_wer": 1.0,
76
+ "step": 112
77
+ },
78
+ {
79
+ "epoch": 7.99,
80
+ "eval_loss": 5.032914161682129,
81
+ "eval_runtime": 49.5792,
82
+ "eval_samples_per_second": 10.085,
83
+ "eval_wer": 1.0,
84
+ "step": 128
85
+ },
86
+ {
87
+ "epoch": 8.99,
88
+ "eval_loss": 4.575235366821289,
89
+ "eval_runtime": 48.3292,
90
+ "eval_samples_per_second": 10.346,
91
+ "eval_wer": 1.0,
92
+ "step": 144
93
+ },
94
+ {
95
+ "epoch": 9.99,
96
+ "eval_loss": 4.227362155914307,
97
+ "eval_runtime": 50.081,
98
+ "eval_samples_per_second": 9.984,
99
+ "eval_wer": 1.0,
100
+ "step": 160
101
+ },
102
+ {
103
+ "epoch": 10.99,
104
+ "eval_loss": 3.977447271347046,
105
+ "eval_runtime": 49.6321,
106
+ "eval_samples_per_second": 10.074,
107
+ "eval_wer": 1.0,
108
+ "step": 176
109
+ },
110
+ {
111
+ "epoch": 11.99,
112
+ "eval_loss": 3.8090567588806152,
113
+ "eval_runtime": 49.5094,
114
+ "eval_samples_per_second": 10.099,
115
+ "eval_wer": 1.0,
116
+ "step": 192
117
+ },
118
+ {
119
+ "epoch": 12.5,
120
+ "learning_rate": 2.9282296650717705e-05,
121
+ "loss": 5.2367,
122
+ "step": 200
123
+ },
124
+ {
125
+ "epoch": 12.99,
126
+ "eval_loss": 3.6537370681762695,
127
+ "eval_runtime": 49.2195,
128
+ "eval_samples_per_second": 10.159,
129
+ "eval_wer": 1.0,
130
+ "step": 208
131
+ },
132
+ {
133
+ "epoch": 13.99,
134
+ "eval_loss": 3.5310537815093994,
135
+ "eval_runtime": 48.9715,
136
+ "eval_samples_per_second": 10.21,
137
+ "eval_wer": 1.0,
138
+ "step": 224
139
+ },
140
+ {
141
+ "epoch": 14.99,
142
+ "eval_loss": 3.4466352462768555,
143
+ "eval_runtime": 49.4588,
144
+ "eval_samples_per_second": 10.109,
145
+ "eval_wer": 1.0,
146
+ "step": 240
147
+ },
148
+ {
149
+ "epoch": 15.99,
150
+ "eval_loss": 3.325173854827881,
151
+ "eval_runtime": 48.5474,
152
+ "eval_samples_per_second": 10.299,
153
+ "eval_wer": 1.0,
154
+ "step": 256
155
+ },
156
+ {
157
+ "epoch": 16.99,
158
+ "eval_loss": 3.2516260147094727,
159
+ "eval_runtime": 49.5175,
160
+ "eval_samples_per_second": 10.097,
161
+ "eval_wer": 1.0,
162
+ "step": 272
163
+ },
164
+ {
165
+ "epoch": 17.99,
166
+ "eval_loss": 3.1839869022369385,
167
+ "eval_runtime": 48.9663,
168
+ "eval_samples_per_second": 10.211,
169
+ "eval_wer": 1.0,
170
+ "step": 288
171
+ },
172
+ {
173
+ "epoch": 18.75,
174
+ "learning_rate": 2.784688995215311e-05,
175
+ "loss": 3.5824,
176
+ "step": 300
177
+ },
178
+ {
179
+ "epoch": 18.99,
180
+ "eval_loss": 3.1358871459960938,
181
+ "eval_runtime": 49.4599,
182
+ "eval_samples_per_second": 10.109,
183
+ "eval_wer": 1.0,
184
+ "step": 304
185
+ },
186
+ {
187
+ "epoch": 19.99,
188
+ "eval_loss": 3.0906283855438232,
189
+ "eval_runtime": 49.5292,
190
+ "eval_samples_per_second": 10.095,
191
+ "eval_wer": 1.0,
192
+ "step": 320
193
+ },
194
+ {
195
+ "epoch": 20.99,
196
+ "eval_loss": 3.0470166206359863,
197
+ "eval_runtime": 49.8959,
198
+ "eval_samples_per_second": 10.021,
199
+ "eval_wer": 1.0,
200
+ "step": 336
201
+ },
202
+ {
203
+ "epoch": 21.99,
204
+ "eval_loss": 3.0199356079101562,
205
+ "eval_runtime": 48.7586,
206
+ "eval_samples_per_second": 10.255,
207
+ "eval_wer": 1.0,
208
+ "step": 352
209
+ },
210
+ {
211
+ "epoch": 22.99,
212
+ "eval_loss": 2.993663787841797,
213
+ "eval_runtime": 49.2925,
214
+ "eval_samples_per_second": 10.144,
215
+ "eval_wer": 1.0,
216
+ "step": 368
217
+ },
218
+ {
219
+ "epoch": 23.99,
220
+ "eval_loss": 2.9856507778167725,
221
+ "eval_runtime": 48.889,
222
+ "eval_samples_per_second": 10.227,
223
+ "eval_wer": 1.0,
224
+ "step": 384
225
+ },
226
+ {
227
+ "epoch": 24.99,
228
+ "learning_rate": 2.6411483253588518e-05,
229
+ "loss": 3.088,
230
+ "step": 400
231
+ },
232
+ {
233
+ "epoch": 24.99,
234
+ "eval_loss": 2.952263116836548,
235
+ "eval_runtime": 49.1389,
236
+ "eval_samples_per_second": 10.175,
237
+ "eval_wer": 1.0,
238
+ "step": 400
239
+ },
240
+ {
241
+ "epoch": 25.99,
242
+ "eval_loss": 2.9487361907958984,
243
+ "eval_runtime": 49.3941,
244
+ "eval_samples_per_second": 10.123,
245
+ "eval_wer": 1.0,
246
+ "step": 416
247
+ },
248
+ {
249
+ "epoch": 26.99,
250
+ "eval_loss": 2.9333157539367676,
251
+ "eval_runtime": 49.2595,
252
+ "eval_samples_per_second": 10.15,
253
+ "eval_wer": 1.0,
254
+ "step": 432
255
+ },
256
+ {
257
+ "epoch": 27.99,
258
+ "eval_loss": 2.9365267753601074,
259
+ "eval_runtime": 49.5918,
260
+ "eval_samples_per_second": 10.082,
261
+ "eval_wer": 1.0,
262
+ "step": 448
263
+ },
264
+ {
265
+ "epoch": 28.99,
266
+ "eval_loss": 2.926832914352417,
267
+ "eval_runtime": 49.5193,
268
+ "eval_samples_per_second": 10.097,
269
+ "eval_wer": 1.0,
270
+ "step": 464
271
+ },
272
+ {
273
+ "epoch": 29.99,
274
+ "eval_loss": 2.9162416458129883,
275
+ "eval_runtime": 49.57,
276
+ "eval_samples_per_second": 10.087,
277
+ "eval_wer": 1.0,
278
+ "step": 480
279
+ },
280
+ {
281
+ "epoch": 30.99,
282
+ "eval_loss": 2.9036505222320557,
283
+ "eval_runtime": 49.6143,
284
+ "eval_samples_per_second": 10.078,
285
+ "eval_wer": 1.0,
286
+ "step": 496
287
+ },
288
+ {
289
+ "epoch": 31.25,
290
+ "learning_rate": 2.4976076555023923e-05,
291
+ "loss": 2.9504,
292
+ "step": 500
293
+ },
294
+ {
295
+ "epoch": 31.99,
296
+ "eval_loss": 2.9000539779663086,
297
+ "eval_runtime": 49.4941,
298
+ "eval_samples_per_second": 10.102,
299
+ "eval_wer": 1.0,
300
+ "step": 512
301
+ },
302
+ {
303
+ "epoch": 32.99,
304
+ "eval_loss": 2.893620491027832,
305
+ "eval_runtime": 50.1869,
306
+ "eval_samples_per_second": 9.963,
307
+ "eval_wer": 1.0,
308
+ "step": 528
309
+ },
310
+ {
311
+ "epoch": 33.99,
312
+ "eval_loss": 2.8881995677948,
313
+ "eval_runtime": 49.8317,
314
+ "eval_samples_per_second": 10.034,
315
+ "eval_wer": 1.0,
316
+ "step": 544
317
+ },
318
+ {
319
+ "epoch": 34.99,
320
+ "eval_loss": 2.8850040435791016,
321
+ "eval_runtime": 49.2167,
322
+ "eval_samples_per_second": 10.159,
323
+ "eval_wer": 1.0,
324
+ "step": 560
325
+ },
326
+ {
327
+ "epoch": 35.99,
328
+ "eval_loss": 2.8831725120544434,
329
+ "eval_runtime": 49.9233,
330
+ "eval_samples_per_second": 10.015,
331
+ "eval_wer": 1.0,
332
+ "step": 576
333
+ },
334
+ {
335
+ "epoch": 36.99,
336
+ "eval_loss": 2.8862743377685547,
337
+ "eval_runtime": 49.9603,
338
+ "eval_samples_per_second": 10.008,
339
+ "eval_wer": 1.0,
340
+ "step": 592
341
+ },
342
+ {
343
+ "epoch": 37.5,
344
+ "learning_rate": 2.354066985645933e-05,
345
+ "loss": 2.9116,
346
+ "step": 600
347
+ },
348
+ {
349
+ "epoch": 37.99,
350
+ "eval_loss": 2.8713128566741943,
351
+ "eval_runtime": 48.8267,
352
+ "eval_samples_per_second": 10.24,
353
+ "eval_wer": 1.0,
354
+ "step": 608
355
+ },
356
+ {
357
+ "epoch": 38.99,
358
+ "eval_loss": 2.8694326877593994,
359
+ "eval_runtime": 50.3391,
360
+ "eval_samples_per_second": 9.933,
361
+ "eval_wer": 1.0,
362
+ "step": 624
363
+ },
364
+ {
365
+ "epoch": 39.99,
366
+ "eval_loss": 2.8684744834899902,
367
+ "eval_runtime": 49.7451,
368
+ "eval_samples_per_second": 10.051,
369
+ "eval_wer": 1.0,
370
+ "step": 640
371
+ },
372
+ {
373
+ "epoch": 40.99,
374
+ "eval_loss": 2.8572192192077637,
375
+ "eval_runtime": 50.1709,
376
+ "eval_samples_per_second": 9.966,
377
+ "eval_wer": 1.0,
378
+ "step": 656
379
+ },
380
+ {
381
+ "epoch": 41.99,
382
+ "eval_loss": 2.8502085208892822,
383
+ "eval_runtime": 49.5484,
384
+ "eval_samples_per_second": 10.091,
385
+ "eval_wer": 1.0,
386
+ "step": 672
387
+ },
388
+ {
389
+ "epoch": 42.99,
390
+ "eval_loss": 2.8417484760284424,
391
+ "eval_runtime": 49.0891,
392
+ "eval_samples_per_second": 10.186,
393
+ "eval_wer": 1.0,
394
+ "step": 688
395
+ },
396
+ {
397
+ "epoch": 43.75,
398
+ "learning_rate": 2.2105263157894736e-05,
399
+ "loss": 2.8836,
400
+ "step": 700
401
+ },
402
+ {
403
+ "epoch": 43.99,
404
+ "eval_loss": 2.8355417251586914,
405
+ "eval_runtime": 49.7,
406
+ "eval_samples_per_second": 10.06,
407
+ "eval_wer": 1.0,
408
+ "step": 704
409
+ },
410
+ {
411
+ "epoch": 44.99,
412
+ "eval_loss": 2.8320789337158203,
413
+ "eval_runtime": 49.7957,
414
+ "eval_samples_per_second": 10.041,
415
+ "eval_wer": 1.0,
416
+ "step": 720
417
+ },
418
+ {
419
+ "epoch": 45.99,
420
+ "eval_loss": 2.8173515796661377,
421
+ "eval_runtime": 49.7947,
422
+ "eval_samples_per_second": 10.041,
423
+ "eval_wer": 1.0,
424
+ "step": 736
425
+ },
426
+ {
427
+ "epoch": 46.99,
428
+ "eval_loss": 2.799645185470581,
429
+ "eval_runtime": 49.4675,
430
+ "eval_samples_per_second": 10.108,
431
+ "eval_wer": 1.0,
432
+ "step": 752
433
+ },
434
+ {
435
+ "epoch": 47.99,
436
+ "eval_loss": 2.783804178237915,
437
+ "eval_runtime": 49.2068,
438
+ "eval_samples_per_second": 10.161,
439
+ "eval_wer": 1.0,
440
+ "step": 768
441
+ },
442
+ {
443
+ "epoch": 48.99,
444
+ "eval_loss": 2.759208917617798,
445
+ "eval_runtime": 49.4097,
446
+ "eval_samples_per_second": 10.119,
447
+ "eval_wer": 1.0,
448
+ "step": 784
449
+ },
450
+ {
451
+ "epoch": 49.99,
452
+ "learning_rate": 2.0669856459330144e-05,
453
+ "loss": 2.8277,
454
+ "step": 800
455
+ },
456
+ {
457
+ "epoch": 49.99,
458
+ "eval_loss": 2.7332029342651367,
459
+ "eval_runtime": 49.4948,
460
+ "eval_samples_per_second": 10.102,
461
+ "eval_wer": 1.0,
462
+ "step": 800
463
+ },
464
+ {
465
+ "epoch": 50.99,
466
+ "eval_loss": 2.709989309310913,
467
+ "eval_runtime": 48.8084,
468
+ "eval_samples_per_second": 10.244,
469
+ "eval_wer": 0.9984348914858097,
470
+ "step": 816
471
+ },
472
+ {
473
+ "epoch": 51.99,
474
+ "eval_loss": 2.6740872859954834,
475
+ "eval_runtime": 49.8376,
476
+ "eval_samples_per_second": 10.033,
477
+ "eval_wer": 0.9957220367278798,
478
+ "step": 832
479
+ },
480
+ {
481
+ "epoch": 52.99,
482
+ "eval_loss": 2.646063804626465,
483
+ "eval_runtime": 49.3421,
484
+ "eval_samples_per_second": 10.133,
485
+ "eval_wer": 0.9908180300500835,
486
+ "step": 848
487
+ },
488
+ {
489
+ "epoch": 53.99,
490
+ "eval_loss": 2.6031899452209473,
491
+ "eval_runtime": 49.5023,
492
+ "eval_samples_per_second": 10.101,
493
+ "eval_wer": 0.9760016694490818,
494
+ "step": 864
495
+ },
496
+ {
497
+ "epoch": 54.99,
498
+ "eval_loss": 2.5573575496673584,
499
+ "eval_runtime": 49.9478,
500
+ "eval_samples_per_second": 10.01,
501
+ "eval_wer": 0.9698455759599333,
502
+ "step": 880
503
+ },
504
+ {
505
+ "epoch": 55.99,
506
+ "eval_loss": 2.488868236541748,
507
+ "eval_runtime": 50.1085,
508
+ "eval_samples_per_second": 9.978,
509
+ "eval_wer": 0.9582637729549248,
510
+ "step": 896
511
+ },
512
+ {
513
+ "epoch": 56.25,
514
+ "learning_rate": 1.9234449760765553e-05,
515
+ "loss": 2.6844,
516
+ "step": 900
517
+ },
518
+ {
519
+ "epoch": 56.99,
520
+ "eval_loss": 2.4326117038726807,
521
+ "eval_runtime": 50.1267,
522
+ "eval_samples_per_second": 9.975,
523
+ "eval_wer": 0.9516903171953256,
524
+ "step": 912
525
+ },
526
+ {
527
+ "epoch": 57.99,
528
+ "eval_loss": 2.357138156890869,
529
+ "eval_runtime": 50.0431,
530
+ "eval_samples_per_second": 9.991,
531
+ "eval_wer": 0.9559682804674458,
532
+ "step": 928
533
+ },
534
+ {
535
+ "epoch": 58.99,
536
+ "eval_loss": 2.2913711071014404,
537
+ "eval_runtime": 50.0784,
538
+ "eval_samples_per_second": 9.984,
539
+ "eval_wer": 0.9559682804674458,
540
+ "step": 944
541
+ },
542
+ {
543
+ "epoch": 59.99,
544
+ "eval_loss": 2.21211314201355,
545
+ "eval_runtime": 49.4565,
546
+ "eval_samples_per_second": 10.11,
547
+ "eval_wer": 0.9565943238731218,
548
+ "step": 960
549
+ },
550
+ {
551
+ "epoch": 60.99,
552
+ "eval_loss": 2.1155571937561035,
553
+ "eval_runtime": 48.9205,
554
+ "eval_samples_per_second": 10.221,
555
+ "eval_wer": 0.9346828046744574,
556
+ "step": 976
557
+ },
558
+ {
559
+ "epoch": 61.99,
560
+ "eval_loss": 2.0269429683685303,
561
+ "eval_runtime": 49.2233,
562
+ "eval_samples_per_second": 10.158,
563
+ "eval_wer": 0.9186143572621035,
564
+ "step": 992
565
+ },
566
+ {
567
+ "epoch": 62.5,
568
+ "learning_rate": 1.7799043062200958e-05,
569
+ "loss": 2.3941,
570
+ "step": 1000
571
+ },
572
+ {
573
+ "epoch": 62.99,
574
+ "eval_loss": 1.9515246152877808,
575
+ "eval_runtime": 49.3099,
576
+ "eval_samples_per_second": 10.14,
577
+ "eval_wer": 0.9009808013355592,
578
+ "step": 1008
579
+ },
580
+ {
581
+ "epoch": 63.99,
582
+ "eval_loss": 1.8633095026016235,
583
+ "eval_runtime": 49.6022,
584
+ "eval_samples_per_second": 10.08,
585
+ "eval_wer": 0.8821994991652755,
586
+ "step": 1024
587
+ },
588
+ {
589
+ "epoch": 64.99,
590
+ "eval_loss": 1.7742440700531006,
591
+ "eval_runtime": 49.4148,
592
+ "eval_samples_per_second": 10.118,
593
+ "eval_wer": 0.8746869782971619,
594
+ "step": 1040
595
+ },
596
+ {
597
+ "epoch": 65.99,
598
+ "eval_loss": 1.6952035427093506,
599
+ "eval_runtime": 49.4954,
600
+ "eval_samples_per_second": 10.102,
601
+ "eval_wer": 0.8543405676126878,
602
+ "step": 1056
603
+ },
604
+ {
605
+ "epoch": 66.99,
606
+ "eval_loss": 1.6496139764785767,
607
+ "eval_runtime": 50.2365,
608
+ "eval_samples_per_second": 9.953,
609
+ "eval_wer": 0.8415066777963273,
610
+ "step": 1072
611
+ },
612
+ {
613
+ "epoch": 67.99,
614
+ "eval_loss": 1.577644944190979,
615
+ "eval_runtime": 49.0048,
616
+ "eval_samples_per_second": 10.203,
617
+ "eval_wer": 0.8229340567612687,
618
+ "step": 1088
619
+ },
620
+ {
621
+ "epoch": 68.75,
622
+ "learning_rate": 1.6363636363636363e-05,
623
+ "loss": 1.9737,
624
+ "step": 1100
625
+ },
626
+ {
627
+ "epoch": 68.99,
628
+ "eval_loss": 1.5079734325408936,
629
+ "eval_runtime": 49.8276,
630
+ "eval_samples_per_second": 10.035,
631
+ "eval_wer": 0.7996661101836394,
632
+ "step": 1104
633
+ },
634
+ {
635
+ "epoch": 69.99,
636
+ "eval_loss": 1.4567533731460571,
637
+ "eval_runtime": 48.81,
638
+ "eval_samples_per_second": 10.244,
639
+ "eval_wer": 0.7835976627712855,
640
+ "step": 1120
641
+ },
642
+ {
643
+ "epoch": 70.99,
644
+ "eval_loss": 1.4095492362976074,
645
+ "eval_runtime": 50.121,
646
+ "eval_samples_per_second": 9.976,
647
+ "eval_wer": 0.7838063439065108,
648
+ "step": 1136
649
+ },
650
+ {
651
+ "epoch": 71.99,
652
+ "eval_loss": 1.3987743854522705,
653
+ "eval_runtime": 49.582,
654
+ "eval_samples_per_second": 10.084,
655
+ "eval_wer": 0.7634599332220368,
656
+ "step": 1152
657
+ },
658
+ {
659
+ "epoch": 72.99,
660
+ "eval_loss": 1.3577879667282104,
661
+ "eval_runtime": 49.6213,
662
+ "eval_samples_per_second": 10.076,
663
+ "eval_wer": 0.7589732888146912,
664
+ "step": 1168
665
+ },
666
+ {
667
+ "epoch": 73.99,
668
+ "eval_loss": 1.3037357330322266,
669
+ "eval_runtime": 49.3802,
670
+ "eval_samples_per_second": 10.126,
671
+ "eval_wer": 0.7461393989983306,
672
+ "step": 1184
673
+ },
674
+ {
675
+ "epoch": 74.99,
676
+ "learning_rate": 1.492822966507177e-05,
677
+ "loss": 1.6567,
678
+ "step": 1200
679
+ },
680
+ {
681
+ "epoch": 74.99,
682
+ "eval_loss": 1.2708954811096191,
683
+ "eval_runtime": 49.4247,
684
+ "eval_samples_per_second": 10.116,
685
+ "eval_wer": 0.7350792988313857,
686
+ "step": 1200
687
+ },
688
+ {
689
+ "epoch": 75.99,
690
+ "eval_loss": 1.2239230871200562,
691
+ "eval_runtime": 50.0296,
692
+ "eval_samples_per_second": 9.994,
693
+ "eval_wer": 0.7258973288814691,
694
+ "step": 1216
695
+ },
696
+ {
697
+ "epoch": 76.99,
698
+ "eval_loss": 1.1831496953964233,
699
+ "eval_runtime": 49.5093,
700
+ "eval_samples_per_second": 10.099,
701
+ "eval_wer": 0.7141068447412354,
702
+ "step": 1232
703
+ },
704
+ {
705
+ "epoch": 77.99,
706
+ "eval_loss": 1.1715855598449707,
707
+ "eval_runtime": 50.4813,
708
+ "eval_samples_per_second": 9.905,
709
+ "eval_wer": 0.7060726210350584,
710
+ "step": 1248
711
+ },
712
+ {
713
+ "epoch": 78.99,
714
+ "eval_loss": 1.1489920616149902,
715
+ "eval_runtime": 49.5198,
716
+ "eval_samples_per_second": 10.097,
717
+ "eval_wer": 0.7013772954924875,
718
+ "step": 1264
719
+ },
720
+ {
721
+ "epoch": 79.99,
722
+ "eval_loss": 1.1155155897140503,
723
+ "eval_runtime": 49.0369,
724
+ "eval_samples_per_second": 10.196,
725
+ "eval_wer": 0.6840567612687813,
726
+ "step": 1280
727
+ },
728
+ {
729
+ "epoch": 80.99,
730
+ "eval_loss": 1.091223955154419,
731
+ "eval_runtime": 49.9086,
732
+ "eval_samples_per_second": 10.018,
733
+ "eval_wer": 0.6698664440734557,
734
+ "step": 1296
735
+ },
736
+ {
737
+ "epoch": 81.25,
738
+ "learning_rate": 1.3492822966507177e-05,
739
+ "loss": 1.4388,
740
+ "step": 1300
741
+ },
742
+ {
743
+ "epoch": 81.99,
744
+ "eval_loss": 1.0740453004837036,
745
+ "eval_runtime": 49.6299,
746
+ "eval_samples_per_second": 10.075,
747
+ "eval_wer": 0.659432387312187,
748
+ "step": 1312
749
+ },
750
+ {
751
+ "epoch": 82.99,
752
+ "eval_loss": 1.0269348621368408,
753
+ "eval_runtime": 49.8552,
754
+ "eval_samples_per_second": 10.029,
755
+ "eval_wer": 0.648059265442404,
756
+ "step": 1328
757
+ },
758
+ {
759
+ "epoch": 83.99,
760
+ "eval_loss": 0.9962567090988159,
761
+ "eval_runtime": 49.19,
762
+ "eval_samples_per_second": 10.165,
763
+ "eval_wer": 0.6295909849749582,
764
+ "step": 1344
765
+ },
766
+ {
767
+ "epoch": 84.99,
768
+ "eval_loss": 0.989078164100647,
769
+ "eval_runtime": 49.4835,
770
+ "eval_samples_per_second": 10.104,
771
+ "eval_wer": 0.6127921535893155,
772
+ "step": 1360
773
+ },
774
+ {
775
+ "epoch": 85.99,
776
+ "eval_loss": 0.9740233421325684,
777
+ "eval_runtime": 49.61,
778
+ "eval_samples_per_second": 10.079,
779
+ "eval_wer": 0.6173831385642737,
780
+ "step": 1376
781
+ },
782
+ {
783
+ "epoch": 86.99,
784
+ "eval_loss": 0.9527219533920288,
785
+ "eval_runtime": 49.7235,
786
+ "eval_samples_per_second": 10.056,
787
+ "eval_wer": 0.6007929883138564,
788
+ "step": 1392
789
+ },
790
+ {
791
+ "epoch": 87.5,
792
+ "learning_rate": 1.2057416267942584e-05,
793
+ "loss": 1.2741,
794
+ "step": 1400
795
+ },
796
+ {
797
+ "epoch": 87.99,
798
+ "eval_loss": 0.926001787185669,
799
+ "eval_runtime": 50.147,
800
+ "eval_samples_per_second": 9.971,
801
+ "eval_wer": 0.5874373956594324,
802
+ "step": 1408
803
+ },
804
+ {
805
+ "epoch": 88.99,
806
+ "eval_loss": 0.928646445274353,
807
+ "eval_runtime": 50.0432,
808
+ "eval_samples_per_second": 9.991,
809
+ "eval_wer": 0.5846202003338898,
810
+ "step": 1424
811
+ },
812
+ {
813
+ "epoch": 89.99,
814
+ "eval_loss": 0.915071964263916,
815
+ "eval_runtime": 48.6625,
816
+ "eval_samples_per_second": 10.275,
817
+ "eval_wer": 0.5746035058430717,
818
+ "step": 1440
819
+ },
820
+ {
821
+ "epoch": 90.99,
822
+ "eval_loss": 0.8867021203041077,
823
+ "eval_runtime": 49.6538,
824
+ "eval_samples_per_second": 10.07,
825
+ "eval_wer": 0.5595784641068448,
826
+ "step": 1456
827
+ },
828
+ {
829
+ "epoch": 91.99,
830
+ "eval_loss": 0.8880752921104431,
831
+ "eval_runtime": 49.9668,
832
+ "eval_samples_per_second": 10.007,
833
+ "eval_wer": 0.5575959933222037,
834
+ "step": 1472
835
+ },
836
+ {
837
+ "epoch": 92.99,
838
+ "eval_loss": 0.8759620189666748,
839
+ "eval_runtime": 49.4232,
840
+ "eval_samples_per_second": 10.117,
841
+ "eval_wer": 0.550813856427379,
842
+ "step": 1488
843
+ },
844
+ {
845
+ "epoch": 93.75,
846
+ "learning_rate": 1.062200956937799e-05,
847
+ "loss": 1.1621,
848
+ "step": 1500
849
+ },
850
+ {
851
+ "epoch": 93.99,
852
+ "eval_loss": 0.8622854948043823,
853
+ "eval_runtime": 50.0666,
854
+ "eval_samples_per_second": 9.987,
855
+ "eval_wer": 0.5416318864774624,
856
+ "step": 1504
857
+ },
858
+ {
859
+ "epoch": 94.99,
860
+ "eval_loss": 0.829269528388977,
861
+ "eval_runtime": 49.9208,
862
+ "eval_samples_per_second": 10.016,
863
+ "eval_wer": 0.5364148580968281,
864
+ "step": 1520
865
+ },
866
+ {
867
+ "epoch": 95.99,
868
+ "eval_loss": 0.8396909832954407,
869
+ "eval_runtime": 48.9453,
870
+ "eval_samples_per_second": 10.215,
871
+ "eval_wer": 0.5285893155258765,
872
+ "step": 1536
873
+ },
874
+ {
875
+ "epoch": 96.99,
876
+ "eval_loss": 0.8327041268348694,
877
+ "eval_runtime": 49.8765,
878
+ "eval_samples_per_second": 10.025,
879
+ "eval_wer": 0.5255634390651085,
880
+ "step": 1552
881
+ },
882
+ {
883
+ "epoch": 97.99,
884
+ "eval_loss": 0.8415578603744507,
885
+ "eval_runtime": 49.2064,
886
+ "eval_samples_per_second": 10.161,
887
+ "eval_wer": 0.528067612687813,
888
+ "step": 1568
889
+ },
890
+ {
891
+ "epoch": 98.99,
892
+ "eval_loss": 0.8029292225837708,
893
+ "eval_runtime": 49.3718,
894
+ "eval_samples_per_second": 10.127,
895
+ "eval_wer": 0.5109557595993323,
896
+ "step": 1584
897
+ },
898
+ {
899
+ "epoch": 99.99,
900
+ "learning_rate": 9.186602870813397e-06,
901
+ "loss": 1.0779,
902
+ "step": 1600
903
+ },
904
+ {
905
+ "epoch": 99.99,
906
+ "eval_loss": 0.8305151462554932,
907
+ "eval_runtime": 50.2704,
908
+ "eval_samples_per_second": 9.946,
909
+ "eval_wer": 0.5202420701168614,
910
+ "step": 1600
911
+ },
912
+ {
913
+ "epoch": 100.99,
914
+ "eval_loss": 0.794418215751648,
915
+ "eval_runtime": 48.8334,
916
+ "eval_samples_per_second": 10.239,
917
+ "eval_wer": 0.5020868113522537,
918
+ "step": 1616
919
+ },
920
+ {
921
+ "epoch": 101.99,
922
+ "eval_loss": 0.8060072064399719,
923
+ "eval_runtime": 50.0476,
924
+ "eval_samples_per_second": 9.99,
925
+ "eval_wer": 0.5134599332220368,
926
+ "step": 1632
927
+ },
928
+ {
929
+ "epoch": 102.99,
930
+ "eval_loss": 0.7654790878295898,
931
+ "eval_runtime": 49.2057,
932
+ "eval_samples_per_second": 10.161,
933
+ "eval_wer": 0.4867487479131887,
934
+ "step": 1648
935
+ },
936
+ {
937
+ "epoch": 103.99,
938
+ "eval_loss": 0.7860442996025085,
939
+ "eval_runtime": 49.9368,
940
+ "eval_samples_per_second": 10.013,
941
+ "eval_wer": 0.49947829716193654,
942
+ "step": 1664
943
+ },
944
+ {
945
+ "epoch": 104.99,
946
+ "eval_loss": 0.7663349509239197,
947
+ "eval_runtime": 49.0409,
948
+ "eval_samples_per_second": 10.196,
949
+ "eval_wer": 0.4897746243739566,
950
+ "step": 1680
951
+ },
952
+ {
953
+ "epoch": 105.99,
954
+ "eval_loss": 0.7740986347198486,
955
+ "eval_runtime": 49.5176,
956
+ "eval_samples_per_second": 10.097,
957
+ "eval_wer": 0.4820534223706177,
958
+ "step": 1696
959
+ },
960
+ {
961
+ "epoch": 106.25,
962
+ "learning_rate": 7.751196172248804e-06,
963
+ "loss": 1.0149,
964
+ "step": 1700
965
+ },
966
+ {
967
+ "epoch": 106.99,
968
+ "eval_loss": 0.72774338722229,
969
+ "eval_runtime": 49.0666,
970
+ "eval_samples_per_second": 10.19,
971
+ "eval_wer": 0.4803839732888147,
972
+ "step": 1712
973
+ },
974
+ {
975
+ "epoch": 107.99,
976
+ "eval_loss": 0.7349050641059875,
977
+ "eval_runtime": 49.3653,
978
+ "eval_samples_per_second": 10.129,
979
+ "eval_wer": 0.46869782971619367,
980
+ "step": 1728
981
+ },
982
+ {
983
+ "epoch": 108.99,
984
+ "eval_loss": 0.730620265007019,
985
+ "eval_runtime": 48.8671,
986
+ "eval_samples_per_second": 10.232,
987
+ "eval_wer": 0.4691151919866444,
988
+ "step": 1744
989
+ },
990
+ {
991
+ "epoch": 109.99,
992
+ "eval_loss": 0.7221301198005676,
993
+ "eval_runtime": 50.0918,
994
+ "eval_samples_per_second": 9.982,
995
+ "eval_wer": 0.4692195325542571,
996
+ "step": 1760
997
+ },
998
+ {
999
+ "epoch": 110.99,
1000
+ "eval_loss": 0.7237880825996399,
1001
+ "eval_runtime": 49.0461,
1002
+ "eval_samples_per_second": 10.194,
1003
+ "eval_wer": 0.4645242070116861,
1004
+ "step": 1776
1005
+ },
1006
+ {
1007
+ "epoch": 111.99,
1008
+ "eval_loss": 0.7305303812026978,
1009
+ "eval_runtime": 49.3111,
1010
+ "eval_samples_per_second": 10.14,
1011
+ "eval_wer": 0.46128964941569284,
1012
+ "step": 1792
1013
+ },
1014
+ {
1015
+ "epoch": 112.5,
1016
+ "learning_rate": 6.31578947368421e-06,
1017
+ "loss": 0.9691,
1018
+ "step": 1800
1019
+ },
1020
+ {
1021
+ "epoch": 112.99,
1022
+ "eval_loss": 0.7333260774612427,
1023
+ "eval_runtime": 49.8629,
1024
+ "eval_samples_per_second": 10.027,
1025
+ "eval_wer": 0.46295909849749584,
1026
+ "step": 1808
1027
+ },
1028
+ {
1029
+ "epoch": 113.99,
1030
+ "eval_loss": 0.738413393497467,
1031
+ "eval_runtime": 49.8063,
1032
+ "eval_samples_per_second": 10.039,
1033
+ "eval_wer": 0.4565943238731219,
1034
+ "step": 1824
1035
+ },
1036
+ {
1037
+ "epoch": 114.99,
1038
+ "eval_loss": 0.7188318371772766,
1039
+ "eval_runtime": 49.6388,
1040
+ "eval_samples_per_second": 10.073,
1041
+ "eval_wer": 0.46212437395659434,
1042
+ "step": 1840
1043
+ },
1044
+ {
1045
+ "epoch": 115.99,
1046
+ "eval_loss": 0.6936877369880676,
1047
+ "eval_runtime": 50.2237,
1048
+ "eval_samples_per_second": 9.955,
1049
+ "eval_wer": 0.4554465776293823,
1050
+ "step": 1856
1051
+ },
1052
+ {
1053
+ "epoch": 116.99,
1054
+ "eval_loss": 0.7130375504493713,
1055
+ "eval_runtime": 49.1949,
1056
+ "eval_samples_per_second": 10.164,
1057
+ "eval_wer": 0.4492904841402337,
1058
+ "step": 1872
1059
+ },
1060
+ {
1061
+ "epoch": 117.99,
1062
+ "eval_loss": 0.7151590585708618,
1063
+ "eval_runtime": 49.9104,
1064
+ "eval_samples_per_second": 10.018,
1065
+ "eval_wer": 0.4492904841402337,
1066
+ "step": 1888
1067
+ },
1068
+ {
1069
+ "epoch": 118.75,
1070
+ "learning_rate": 4.880382775119617e-06,
1071
+ "loss": 0.9391,
1072
+ "step": 1900
1073
+ },
1074
+ {
1075
+ "epoch": 118.99,
1076
+ "eval_loss": 0.7040448188781738,
1077
+ "eval_runtime": 49.7002,
1078
+ "eval_samples_per_second": 10.06,
1079
+ "eval_wer": 0.45398580968280466,
1080
+ "step": 1904
1081
+ },
1082
+ {
1083
+ "epoch": 119.99,
1084
+ "eval_loss": 0.6735167503356934,
1085
+ "eval_runtime": 49.7634,
1086
+ "eval_samples_per_second": 10.048,
1087
+ "eval_wer": 0.4393781302170284,
1088
+ "step": 1920
1089
+ },
1090
+ {
1091
+ "epoch": 120.99,
1092
+ "eval_loss": 0.6741155385971069,
1093
+ "eval_runtime": 49.3263,
1094
+ "eval_samples_per_second": 10.137,
1095
+ "eval_wer": 0.4453255425709516,
1096
+ "step": 1936
1097
+ },
1098
+ {
1099
+ "epoch": 121.99,
1100
+ "eval_loss": 0.7235249280929565,
1101
+ "eval_runtime": 49.3356,
1102
+ "eval_samples_per_second": 10.135,
1103
+ "eval_wer": 0.4561769616026711,
1104
+ "step": 1952
1105
+ },
1106
+ {
1107
+ "epoch": 122.99,
1108
+ "eval_loss": 0.6766911745071411,
1109
+ "eval_runtime": 49.3909,
1110
+ "eval_samples_per_second": 10.123,
1111
+ "eval_wer": 0.43541318864774625,
1112
+ "step": 1968
1113
+ },
1114
+ {
1115
+ "epoch": 123.99,
1116
+ "eval_loss": 0.6751864552497864,
1117
+ "eval_runtime": 49.3541,
1118
+ "eval_samples_per_second": 10.131,
1119
+ "eval_wer": 0.43343071786310516,
1120
+ "step": 1984
1121
+ },
1122
+ {
1123
+ "epoch": 124.99,
1124
+ "learning_rate": 3.444976076555024e-06,
1125
+ "loss": 0.9094,
1126
+ "step": 2000
1127
+ },
1128
+ {
1129
+ "epoch": 124.99,
1130
+ "eval_loss": 0.6876149773597717,
1131
+ "eval_runtime": 49.706,
1132
+ "eval_samples_per_second": 10.059,
1133
+ "eval_wer": 0.4397954924874791,
1134
+ "step": 2000
1135
+ },
1136
+ {
1137
+ "epoch": 125.99,
1138
+ "eval_loss": 0.7010783553123474,
1139
+ "eval_runtime": 49.0784,
1140
+ "eval_samples_per_second": 10.188,
1141
+ "eval_wer": 0.44835141903171954,
1142
+ "step": 2016
1143
+ },
1144
+ {
1145
+ "epoch": 126.99,
1146
+ "eval_loss": 0.6755461692810059,
1147
+ "eval_runtime": 48.3236,
1148
+ "eval_samples_per_second": 10.347,
1149
+ "eval_wer": 0.42497913188647746,
1150
+ "step": 2032
1151
+ },
1152
+ {
1153
+ "epoch": 127.99,
1154
+ "eval_loss": 0.7173025608062744,
1155
+ "eval_runtime": 49.6709,
1156
+ "eval_samples_per_second": 10.066,
1157
+ "eval_wer": 0.4476210350584307,
1158
+ "step": 2048
1159
+ },
1160
+ {
1161
+ "epoch": 128.99,
1162
+ "eval_loss": 0.7063612937927246,
1163
+ "eval_runtime": 49.982,
1164
+ "eval_samples_per_second": 10.004,
1165
+ "eval_wer": 0.44209098497495825,
1166
+ "step": 2064
1167
+ },
1168
+ {
1169
+ "epoch": 129.99,
1170
+ "eval_loss": 0.6794615387916565,
1171
+ "eval_runtime": 49.1686,
1172
+ "eval_samples_per_second": 10.169,
1173
+ "eval_wer": 0.43426544240400666,
1174
+ "step": 2080
1175
+ },
1176
+ {
1177
+ "epoch": 129.99,
1178
+ "step": 2080,
1179
+ "total_flos": 0,
1180
+ "train_runtime": 94419.6153,
1181
+ "train_samples_per_second": 0.024
1182
+ }
1183
+ ],
1184
+ "max_steps": 2240,
1185
+ "num_train_epochs": 140,
1186
+ "total_flos": 0,
1187
+ "trial_name": null,
1188
+ "trial_params": null
1189
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1c71efb6adb93df87f465dc27c83d6ccf6bf7c561771e4f59ccb252db4cd828
3
+ size 2607
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<pad>": 0, "|": 1, "<unk>": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "o": 17, "p": 18, "q": 19, "r": 20, "s": 21, "t": 22, "u": 23, "v": 24, "w": 25, "x": 26, "y": 27, "z": 28, "ç": 29, "ã": 30, "à": 31, "á": 32, "â": 33, "ê": 34, "é": 35, "í": 36, "ó": 37, "ô": 38, "õ": 39, "ú": 40, "û": 41, "-": 42, "<s>": 43, "</s>": 44}