manifoldix
commited on
Commit
•
ad9957b
1
Parent(s):
306a9bf
acoustic model and LM
Browse files- added_tokens.json +1 -0
- alphabet.json +1 -0
- config.json +108 -0
- language_model/5gram.bin +3 -0
- language_model/attrs.json +1 -0
- language_model/unigrams.txt +0 -0
- preprocessor_config.json +10 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- trainer_state.json +916 -0
- training_args.bin +3 -0
- vocab.json +1 -0
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 38, "</s>": 39}
|
alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": [" ", "\u0622", "\u0626", "\u0627", "\u0628", "\u062a", "\u062b", "\u062c", "\u062d", "\u062e", "\u062f", "\u0630", "\u0631", "\u0632", "\u0633", "\u0634", "\u0635", "\u0636", "\u0637", "\u0638", "\u0639", "\u063a", "\u0641", "\u0642", "\u0644", "\u0645", "\u0646", "\u0647", "\u0648", "\u067e", "\u0686", "\u0698", "\u06a9", "\u06af", "\u06cc", "\u200c", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
|
config.json
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-xls-r-300m",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"adapter_kernel_size": 3,
|
5 |
+
"adapter_stride": 2,
|
6 |
+
"add_adapter": false,
|
7 |
+
"apply_spec_augment": true,
|
8 |
+
"architectures": [
|
9 |
+
"Wav2Vec2ForCTC"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.0,
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 768,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": true,
|
17 |
+
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
+
],
|
26 |
+
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
+
],
|
35 |
+
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
+
],
|
44 |
+
"ctc_loss_reduction": "mean",
|
45 |
+
"ctc_zero_infinity": false,
|
46 |
+
"diversity_loss_weight": 0.1,
|
47 |
+
"do_stable_layer_norm": true,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_dropout": 0.0,
|
51 |
+
"feat_extract_norm": "layer",
|
52 |
+
"feat_proj_dropout": 0.0,
|
53 |
+
"feat_quantizer_dropout": 0.0,
|
54 |
+
"final_dropout": 0.0,
|
55 |
+
"gradient_checkpointing": false,
|
56 |
+
"hidden_act": "gelu",
|
57 |
+
"hidden_dropout": 0.0,
|
58 |
+
"hidden_size": 1024,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 4096,
|
61 |
+
"layer_norm_eps": 1e-05,
|
62 |
+
"layerdrop": 0.0,
|
63 |
+
"mask_feature_length": 10,
|
64 |
+
"mask_feature_min_masks": 0,
|
65 |
+
"mask_feature_prob": 0.0,
|
66 |
+
"mask_time_length": 10,
|
67 |
+
"mask_time_min_masks": 2,
|
68 |
+
"mask_time_prob": 0.05,
|
69 |
+
"model_type": "wav2vec2",
|
70 |
+
"num_adapter_layers": 3,
|
71 |
+
"num_attention_heads": 16,
|
72 |
+
"num_codevector_groups": 2,
|
73 |
+
"num_codevectors_per_group": 320,
|
74 |
+
"num_conv_pos_embedding_groups": 16,
|
75 |
+
"num_conv_pos_embeddings": 128,
|
76 |
+
"num_feat_extract_layers": 7,
|
77 |
+
"num_hidden_layers": 24,
|
78 |
+
"num_negatives": 100,
|
79 |
+
"output_hidden_size": 1024,
|
80 |
+
"pad_token_id": 37,
|
81 |
+
"proj_codevector_dim": 768,
|
82 |
+
"tdnn_dilation": [
|
83 |
+
1,
|
84 |
+
2,
|
85 |
+
3,
|
86 |
+
1,
|
87 |
+
1
|
88 |
+
],
|
89 |
+
"tdnn_dim": [
|
90 |
+
512,
|
91 |
+
512,
|
92 |
+
512,
|
93 |
+
512,
|
94 |
+
1500
|
95 |
+
],
|
96 |
+
"tdnn_kernel": [
|
97 |
+
5,
|
98 |
+
3,
|
99 |
+
3,
|
100 |
+
1,
|
101 |
+
1
|
102 |
+
],
|
103 |
+
"torch_dtype": "float32",
|
104 |
+
"transformers_version": "4.16.0.dev0",
|
105 |
+
"use_weighted_layer_sum": false,
|
106 |
+
"vocab_size": 40,
|
107 |
+
"xvector_output_dim": 512
|
108 |
+
}
|
language_model/5gram.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2827e996580d285c26c7e1fced3df2aca861a2507c3907f1fdcccbccf54e3291
|
3 |
+
size 1062932976
|
language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
language_model/unigrams.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
preprocessor_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
+
"return_attention_mask": true,
|
9 |
+
"sampling_rate": 16000
|
10 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67f553a9f4b259d5f91ab7a99ae359581e926d384978566f6d7afacb4e15ad10
|
3 |
+
size 1262087665
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}
|
trainer_state.json
ADDED
@@ -0,0 +1,916 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 119.99875156054931,
|
5 |
+
"global_step": 24000,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 2.0,
|
12 |
+
"learning_rate": 7.960000000000001e-05,
|
13 |
+
"loss": 7.6701,
|
14 |
+
"step": 400
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 2.0,
|
18 |
+
"eval_loss": 2.900460958480835,
|
19 |
+
"eval_runtime": 252.4053,
|
20 |
+
"eval_samples_per_second": 20.653,
|
21 |
+
"eval_steps_per_second": 2.583,
|
22 |
+
"eval_wer": 1.0,
|
23 |
+
"step": 400
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 4.0,
|
27 |
+
"learning_rate": 9.873191489361703e-05,
|
28 |
+
"loss": 1.0784,
|
29 |
+
"step": 800
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"epoch": 4.0,
|
33 |
+
"eval_loss": 0.4933229088783264,
|
34 |
+
"eval_runtime": 254.2522,
|
35 |
+
"eval_samples_per_second": 20.503,
|
36 |
+
"eval_steps_per_second": 2.564,
|
37 |
+
"eval_wer": 0.46976841118086854,
|
38 |
+
"step": 800
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 6.0,
|
42 |
+
"learning_rate": 9.702978723404256e-05,
|
43 |
+
"loss": 0.3128,
|
44 |
+
"step": 1200
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 6.0,
|
48 |
+
"eval_loss": 0.3993886709213257,
|
49 |
+
"eval_runtime": 252.398,
|
50 |
+
"eval_samples_per_second": 20.654,
|
51 |
+
"eval_steps_per_second": 2.583,
|
52 |
+
"eval_wer": 0.37059421158204603,
|
53 |
+
"step": 1200
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"epoch": 8.0,
|
57 |
+
"learning_rate": 9.532765957446809e-05,
|
58 |
+
"loss": 0.2158,
|
59 |
+
"step": 1600
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"epoch": 8.0,
|
63 |
+
"eval_loss": 0.3813767731189728,
|
64 |
+
"eval_runtime": 260.377,
|
65 |
+
"eval_samples_per_second": 20.021,
|
66 |
+
"eval_steps_per_second": 2.504,
|
67 |
+
"eval_wer": 0.3755438038919426,
|
68 |
+
"step": 1600
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"epoch": 10.0,
|
72 |
+
"learning_rate": 9.362553191489361e-05,
|
73 |
+
"loss": 0.1716,
|
74 |
+
"step": 2000
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 10.0,
|
78 |
+
"eval_loss": 0.3713369071483612,
|
79 |
+
"eval_runtime": 256.1576,
|
80 |
+
"eval_samples_per_second": 20.351,
|
81 |
+
"eval_steps_per_second": 2.545,
|
82 |
+
"eval_wer": 0.33745799359158046,
|
83 |
+
"step": 2000
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"epoch": 12.0,
|
87 |
+
"learning_rate": 9.192340425531915e-05,
|
88 |
+
"loss": 0.1419,
|
89 |
+
"step": 2400
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"epoch": 12.0,
|
93 |
+
"eval_loss": 0.3995266556739807,
|
94 |
+
"eval_runtime": 252.1996,
|
95 |
+
"eval_samples_per_second": 20.67,
|
96 |
+
"eval_steps_per_second": 2.585,
|
97 |
+
"eval_wer": 0.35183786177612214,
|
98 |
+
"step": 2400
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 14.0,
|
102 |
+
"learning_rate": 9.022127659574468e-05,
|
103 |
+
"loss": 0.1236,
|
104 |
+
"step": 2800
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 14.0,
|
108 |
+
"eval_loss": 0.41960370540618896,
|
109 |
+
"eval_runtime": 256.7174,
|
110 |
+
"eval_samples_per_second": 20.306,
|
111 |
+
"eval_steps_per_second": 2.54,
|
112 |
+
"eval_wer": 0.34042774897751843,
|
113 |
+
"step": 2800
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"epoch": 16.0,
|
117 |
+
"learning_rate": 8.851914893617021e-05,
|
118 |
+
"loss": 0.1136,
|
119 |
+
"step": 3200
|
120 |
+
},
|
121 |
+
{
|
122 |
+
"epoch": 16.0,
|
123 |
+
"eval_loss": 0.3961251676082611,
|
124 |
+
"eval_runtime": 257.0042,
|
125 |
+
"eval_samples_per_second": 20.284,
|
126 |
+
"eval_steps_per_second": 2.537,
|
127 |
+
"eval_wer": 0.32029072342199183,
|
128 |
+
"step": 3200
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 18.0,
|
132 |
+
"learning_rate": 8.682127659574468e-05,
|
133 |
+
"loss": 0.1018,
|
134 |
+
"step": 3600
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"epoch": 18.0,
|
138 |
+
"eval_loss": 0.4284366965293884,
|
139 |
+
"eval_runtime": 253.1831,
|
140 |
+
"eval_samples_per_second": 20.59,
|
141 |
+
"eval_steps_per_second": 2.575,
|
142 |
+
"eval_wer": 0.3373798421340558,
|
143 |
+
"step": 3600
|
144 |
+
},
|
145 |
+
{
|
146 |
+
"epoch": 20.0,
|
147 |
+
"learning_rate": 8.511914893617021e-05,
|
148 |
+
"loss": 0.0948,
|
149 |
+
"step": 4000
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 20.0,
|
153 |
+
"eval_loss": 0.4246082305908203,
|
154 |
+
"eval_runtime": 250.1867,
|
155 |
+
"eval_samples_per_second": 20.836,
|
156 |
+
"eval_steps_per_second": 2.606,
|
157 |
+
"eval_wer": 0.32953864589574594,
|
158 |
+
"step": 4000
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"epoch": 22.0,
|
162 |
+
"learning_rate": 8.341702127659575e-05,
|
163 |
+
"loss": 0.0837,
|
164 |
+
"step": 4400
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"epoch": 22.0,
|
168 |
+
"eval_loss": 0.41016092896461487,
|
169 |
+
"eval_runtime": 255.5501,
|
170 |
+
"eval_samples_per_second": 20.399,
|
171 |
+
"eval_steps_per_second": 2.551,
|
172 |
+
"eval_wer": 0.31726886706437074,
|
173 |
+
"step": 4400
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"epoch": 24.0,
|
177 |
+
"learning_rate": 8.171489361702128e-05,
|
178 |
+
"loss": 0.0823,
|
179 |
+
"step": 4800
|
180 |
+
},
|
181 |
+
{
|
182 |
+
"epoch": 24.0,
|
183 |
+
"eval_loss": 0.42129427194595337,
|
184 |
+
"eval_runtime": 254.0415,
|
185 |
+
"eval_samples_per_second": 20.52,
|
186 |
+
"eval_steps_per_second": 2.567,
|
187 |
+
"eval_wer": 0.3245109021283247,
|
188 |
+
"step": 4800
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"epoch": 26.0,
|
192 |
+
"learning_rate": 8.001276595744681e-05,
|
193 |
+
"loss": 0.079,
|
194 |
+
"step": 5200
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"epoch": 26.0,
|
198 |
+
"eval_loss": 0.4289919435977936,
|
199 |
+
"eval_runtime": 256.8225,
|
200 |
+
"eval_samples_per_second": 20.298,
|
201 |
+
"eval_steps_per_second": 2.539,
|
202 |
+
"eval_wer": 0.3159923932581343,
|
203 |
+
"step": 5200
|
204 |
+
},
|
205 |
+
{
|
206 |
+
"epoch": 28.0,
|
207 |
+
"learning_rate": 7.831063829787234e-05,
|
208 |
+
"loss": 0.0736,
|
209 |
+
"step": 5600
|
210 |
+
},
|
211 |
+
{
|
212 |
+
"epoch": 28.0,
|
213 |
+
"eval_loss": 0.45376530289649963,
|
214 |
+
"eval_runtime": 250.495,
|
215 |
+
"eval_samples_per_second": 20.811,
|
216 |
+
"eval_steps_per_second": 2.603,
|
217 |
+
"eval_wer": 0.3086982572224972,
|
218 |
+
"step": 5600
|
219 |
+
},
|
220 |
+
{
|
221 |
+
"epoch": 30.0,
|
222 |
+
"learning_rate": 7.66127659574468e-05,
|
223 |
+
"loss": 0.0702,
|
224 |
+
"step": 6000
|
225 |
+
},
|
226 |
+
{
|
227 |
+
"epoch": 30.0,
|
228 |
+
"eval_loss": 0.4636918306350708,
|
229 |
+
"eval_runtime": 251.4184,
|
230 |
+
"eval_samples_per_second": 20.734,
|
231 |
+
"eval_steps_per_second": 2.593,
|
232 |
+
"eval_wer": 0.31151170969338576,
|
233 |
+
"step": 6000
|
234 |
+
},
|
235 |
+
{
|
236 |
+
"epoch": 32.0,
|
237 |
+
"learning_rate": 7.491063829787235e-05,
|
238 |
+
"loss": 0.0668,
|
239 |
+
"step": 6400
|
240 |
+
},
|
241 |
+
{
|
242 |
+
"epoch": 32.0,
|
243 |
+
"eval_loss": 0.4827902615070343,
|
244 |
+
"eval_runtime": 248.7074,
|
245 |
+
"eval_samples_per_second": 20.96,
|
246 |
+
"eval_steps_per_second": 2.622,
|
247 |
+
"eval_wer": 0.3185192903847657,
|
248 |
+
"step": 6400
|
249 |
+
},
|
250 |
+
{
|
251 |
+
"epoch": 34.0,
|
252 |
+
"learning_rate": 7.320851063829788e-05,
|
253 |
+
"loss": 0.0646,
|
254 |
+
"step": 6800
|
255 |
+
},
|
256 |
+
{
|
257 |
+
"epoch": 34.0,
|
258 |
+
"eval_loss": 0.4427362084388733,
|
259 |
+
"eval_runtime": 252.8917,
|
260 |
+
"eval_samples_per_second": 20.614,
|
261 |
+
"eval_steps_per_second": 2.578,
|
262 |
+
"eval_wer": 0.311381457264178,
|
263 |
+
"step": 6800
|
264 |
+
},
|
265 |
+
{
|
266 |
+
"epoch": 36.0,
|
267 |
+
"learning_rate": 7.15063829787234e-05,
|
268 |
+
"loss": 0.06,
|
269 |
+
"step": 7200
|
270 |
+
},
|
271 |
+
{
|
272 |
+
"epoch": 36.0,
|
273 |
+
"eval_loss": 0.4571715295314789,
|
274 |
+
"eval_runtime": 252.2048,
|
275 |
+
"eval_samples_per_second": 20.67,
|
276 |
+
"eval_steps_per_second": 2.585,
|
277 |
+
"eval_wer": 0.311615911636752,
|
278 |
+
"step": 7200
|
279 |
+
},
|
280 |
+
{
|
281 |
+
"epoch": 38.0,
|
282 |
+
"learning_rate": 6.980425531914893e-05,
|
283 |
+
"loss": 0.0584,
|
284 |
+
"step": 7600
|
285 |
+
},
|
286 |
+
{
|
287 |
+
"epoch": 38.0,
|
288 |
+
"eval_loss": 0.4944392740726471,
|
289 |
+
"eval_runtime": 251.4845,
|
290 |
+
"eval_samples_per_second": 20.729,
|
291 |
+
"eval_steps_per_second": 2.593,
|
292 |
+
"eval_wer": 0.30804699507645816,
|
293 |
+
"step": 7600
|
294 |
+
},
|
295 |
+
{
|
296 |
+
"epoch": 40.0,
|
297 |
+
"learning_rate": 6.810212765957446e-05,
|
298 |
+
"loss": 0.0545,
|
299 |
+
"step": 8000
|
300 |
+
},
|
301 |
+
{
|
302 |
+
"epoch": 40.0,
|
303 |
+
"eval_loss": 0.4599160850048065,
|
304 |
+
"eval_runtime": 253.7962,
|
305 |
+
"eval_samples_per_second": 20.54,
|
306 |
+
"eval_steps_per_second": 2.569,
|
307 |
+
"eval_wer": 0.3099747310287337,
|
308 |
+
"step": 8000
|
309 |
+
},
|
310 |
+
{
|
311 |
+
"epoch": 42.0,
|
312 |
+
"learning_rate": 6.64e-05,
|
313 |
+
"loss": 0.0536,
|
314 |
+
"step": 8400
|
315 |
+
},
|
316 |
+
{
|
317 |
+
"epoch": 42.0,
|
318 |
+
"eval_loss": 0.4586937427520752,
|
319 |
+
"eval_runtime": 256.4427,
|
320 |
+
"eval_samples_per_second": 20.328,
|
321 |
+
"eval_steps_per_second": 2.542,
|
322 |
+
"eval_wer": 0.30338395811081875,
|
323 |
+
"step": 8400
|
324 |
+
},
|
325 |
+
{
|
326 |
+
"epoch": 44.0,
|
327 |
+
"learning_rate": 6.469787234042553e-05,
|
328 |
+
"loss": 0.0506,
|
329 |
+
"step": 8800
|
330 |
+
},
|
331 |
+
{
|
332 |
+
"epoch": 44.0,
|
333 |
+
"eval_loss": 0.46839669346809387,
|
334 |
+
"eval_runtime": 253.5984,
|
335 |
+
"eval_samples_per_second": 20.556,
|
336 |
+
"eval_steps_per_second": 2.571,
|
337 |
+
"eval_wer": 0.3020814338187407,
|
338 |
+
"step": 8800
|
339 |
+
},
|
340 |
+
{
|
341 |
+
"epoch": 46.0,
|
342 |
+
"learning_rate": 6.299574468085107e-05,
|
343 |
+
"loss": 0.0504,
|
344 |
+
"step": 9200
|
345 |
+
},
|
346 |
+
{
|
347 |
+
"epoch": 46.0,
|
348 |
+
"eval_loss": 0.4694036841392517,
|
349 |
+
"eval_runtime": 252.7537,
|
350 |
+
"eval_samples_per_second": 20.625,
|
351 |
+
"eval_steps_per_second": 2.58,
|
352 |
+
"eval_wer": 0.30710917758616196,
|
353 |
+
"step": 9200
|
354 |
+
},
|
355 |
+
{
|
356 |
+
"epoch": 48.0,
|
357 |
+
"learning_rate": 6.12936170212766e-05,
|
358 |
+
"loss": 0.048,
|
359 |
+
"step": 9600
|
360 |
+
},
|
361 |
+
{
|
362 |
+
"epoch": 48.0,
|
363 |
+
"eval_loss": 0.4677378535270691,
|
364 |
+
"eval_runtime": 252.9941,
|
365 |
+
"eval_samples_per_second": 20.605,
|
366 |
+
"eval_steps_per_second": 2.577,
|
367 |
+
"eval_wer": 0.2971578919946857,
|
368 |
+
"step": 9600
|
369 |
+
},
|
370 |
+
{
|
371 |
+
"epoch": 50.0,
|
372 |
+
"learning_rate": 5.9591489361702134e-05,
|
373 |
+
"loss": 0.045,
|
374 |
+
"step": 10000
|
375 |
+
},
|
376 |
+
{
|
377 |
+
"epoch": 50.0,
|
378 |
+
"eval_loss": 0.46043330430984497,
|
379 |
+
"eval_runtime": 249.2435,
|
380 |
+
"eval_samples_per_second": 20.915,
|
381 |
+
"eval_steps_per_second": 2.616,
|
382 |
+
"eval_wer": 0.29778310365488314,
|
383 |
+
"step": 10000
|
384 |
+
},
|
385 |
+
{
|
386 |
+
"epoch": 52.0,
|
387 |
+
"learning_rate": 5.7893617021276604e-05,
|
388 |
+
"loss": 0.0436,
|
389 |
+
"step": 10400
|
390 |
+
},
|
391 |
+
{
|
392 |
+
"epoch": 52.0,
|
393 |
+
"eval_loss": 0.4766680598258972,
|
394 |
+
"eval_runtime": 251.6774,
|
395 |
+
"eval_samples_per_second": 20.713,
|
396 |
+
"eval_steps_per_second": 2.591,
|
397 |
+
"eval_wer": 0.29731419490973504,
|
398 |
+
"step": 10400
|
399 |
+
},
|
400 |
+
{
|
401 |
+
"epoch": 54.0,
|
402 |
+
"learning_rate": 5.619148936170213e-05,
|
403 |
+
"loss": 0.0422,
|
404 |
+
"step": 10800
|
405 |
+
},
|
406 |
+
{
|
407 |
+
"epoch": 54.0,
|
408 |
+
"eval_loss": 0.4668702185153961,
|
409 |
+
"eval_runtime": 252.6671,
|
410 |
+
"eval_samples_per_second": 20.632,
|
411 |
+
"eval_steps_per_second": 2.58,
|
412 |
+
"eval_wer": 0.2975486492823091,
|
413 |
+
"step": 10800
|
414 |
+
},
|
415 |
+
{
|
416 |
+
"epoch": 56.0,
|
417 |
+
"learning_rate": 5.448936170212766e-05,
|
418 |
+
"loss": 0.0404,
|
419 |
+
"step": 11200
|
420 |
+
},
|
421 |
+
{
|
422 |
+
"epoch": 56.0,
|
423 |
+
"eval_loss": 0.4487351179122925,
|
424 |
+
"eval_runtime": 250.621,
|
425 |
+
"eval_samples_per_second": 20.8,
|
426 |
+
"eval_steps_per_second": 2.602,
|
427 |
+
"eval_wer": 0.2965066298486467,
|
428 |
+
"step": 11200
|
429 |
+
},
|
430 |
+
{
|
431 |
+
"epoch": 58.0,
|
432 |
+
"learning_rate": 5.278723404255319e-05,
|
433 |
+
"loss": 0.04,
|
434 |
+
"step": 11600
|
435 |
+
},
|
436 |
+
{
|
437 |
+
"epoch": 58.0,
|
438 |
+
"eval_loss": 0.49005138874053955,
|
439 |
+
"eval_runtime": 254.1559,
|
440 |
+
"eval_samples_per_second": 20.511,
|
441 |
+
"eval_steps_per_second": 2.565,
|
442 |
+
"eval_wer": 0.2988511735743872,
|
443 |
+
"step": 11600
|
444 |
+
},
|
445 |
+
{
|
446 |
+
"epoch": 60.0,
|
447 |
+
"learning_rate": 5.108510638297873e-05,
|
448 |
+
"loss": 0.0375,
|
449 |
+
"step": 12000
|
450 |
+
},
|
451 |
+
{
|
452 |
+
"epoch": 60.0,
|
453 |
+
"eval_loss": 0.46530571579933167,
|
454 |
+
"eval_runtime": 255.1439,
|
455 |
+
"eval_samples_per_second": 20.432,
|
456 |
+
"eval_steps_per_second": 2.555,
|
457 |
+
"eval_wer": 0.2963763774194389,
|
458 |
+
"step": 12000
|
459 |
+
},
|
460 |
+
{
|
461 |
+
"epoch": 62.0,
|
462 |
+
"learning_rate": 4.938297872340426e-05,
|
463 |
+
"loss": 0.0353,
|
464 |
+
"step": 12400
|
465 |
+
},
|
466 |
+
{
|
467 |
+
"epoch": 62.0,
|
468 |
+
"eval_loss": 0.5175049901008606,
|
469 |
+
"eval_runtime": 253.9576,
|
470 |
+
"eval_samples_per_second": 20.527,
|
471 |
+
"eval_steps_per_second": 2.567,
|
472 |
+
"eval_wer": 0.2963242764477558,
|
473 |
+
"step": 12400
|
474 |
+
},
|
475 |
+
{
|
476 |
+
"epoch": 64.0,
|
477 |
+
"learning_rate": 4.768085106382979e-05,
|
478 |
+
"loss": 0.0346,
|
479 |
+
"step": 12800
|
480 |
+
},
|
481 |
+
{
|
482 |
+
"epoch": 64.0,
|
483 |
+
"eval_loss": 0.5116600394248962,
|
484 |
+
"eval_runtime": 256.8702,
|
485 |
+
"eval_samples_per_second": 20.294,
|
486 |
+
"eval_steps_per_second": 2.538,
|
487 |
+
"eval_wer": 0.2914528355953839,
|
488 |
+
"step": 12800
|
489 |
+
},
|
490 |
+
{
|
491 |
+
"epoch": 66.0,
|
492 |
+
"learning_rate": 4.597872340425532e-05,
|
493 |
+
"loss": 0.0341,
|
494 |
+
"step": 13200
|
495 |
+
},
|
496 |
+
{
|
497 |
+
"epoch": 66.0,
|
498 |
+
"eval_loss": 0.4671209454536438,
|
499 |
+
"eval_runtime": 255.8006,
|
500 |
+
"eval_samples_per_second": 20.379,
|
501 |
+
"eval_steps_per_second": 2.549,
|
502 |
+
"eval_wer": 0.2945528434105296,
|
503 |
+
"step": 13200
|
504 |
+
},
|
505 |
+
{
|
506 |
+
"epoch": 68.0,
|
507 |
+
"learning_rate": 4.4280851063829785e-05,
|
508 |
+
"loss": 0.0332,
|
509 |
+
"step": 13600
|
510 |
+
},
|
511 |
+
{
|
512 |
+
"epoch": 68.0,
|
513 |
+
"eval_loss": 0.48402974009513855,
|
514 |
+
"eval_runtime": 251.9224,
|
515 |
+
"eval_samples_per_second": 20.693,
|
516 |
+
"eval_steps_per_second": 2.588,
|
517 |
+
"eval_wer": 0.2882746763227134,
|
518 |
+
"step": 13600
|
519 |
+
},
|
520 |
+
{
|
521 |
+
"epoch": 70.0,
|
522 |
+
"learning_rate": 4.257872340425532e-05,
|
523 |
+
"loss": 0.0307,
|
524 |
+
"step": 14000
|
525 |
+
},
|
526 |
+
{
|
527 |
+
"epoch": 70.0,
|
528 |
+
"eval_loss": 0.4735446870326996,
|
529 |
+
"eval_runtime": 251.6191,
|
530 |
+
"eval_samples_per_second": 20.718,
|
531 |
+
"eval_steps_per_second": 2.591,
|
532 |
+
"eval_wer": 0.2853570219084586,
|
533 |
+
"step": 14000
|
534 |
+
},
|
535 |
+
{
|
536 |
+
"epoch": 72.0,
|
537 |
+
"learning_rate": 4.0876595744680854e-05,
|
538 |
+
"loss": 0.0296,
|
539 |
+
"step": 14400
|
540 |
+
},
|
541 |
+
{
|
542 |
+
"epoch": 72.0,
|
543 |
+
"eval_loss": 0.49666687846183777,
|
544 |
+
"eval_runtime": 255.1334,
|
545 |
+
"eval_samples_per_second": 20.432,
|
546 |
+
"eval_steps_per_second": 2.556,
|
547 |
+
"eval_wer": 0.2870242530023185,
|
548 |
+
"step": 14400
|
549 |
+
},
|
550 |
+
{
|
551 |
+
"epoch": 74.0,
|
552 |
+
"learning_rate": 3.917446808510639e-05,
|
553 |
+
"loss": 0.0288,
|
554 |
+
"step": 14800
|
555 |
+
},
|
556 |
+
{
|
557 |
+
"epoch": 74.0,
|
558 |
+
"eval_loss": 0.4831916093826294,
|
559 |
+
"eval_runtime": 250.196,
|
560 |
+
"eval_samples_per_second": 20.836,
|
561 |
+
"eval_steps_per_second": 2.606,
|
562 |
+
"eval_wer": 0.28111079271628414,
|
563 |
+
"step": 14800
|
564 |
+
},
|
565 |
+
{
|
566 |
+
"epoch": 76.0,
|
567 |
+
"learning_rate": 3.747234042553192e-05,
|
568 |
+
"loss": 0.0278,
|
569 |
+
"step": 15200
|
570 |
+
},
|
571 |
+
{
|
572 |
+
"epoch": 76.0,
|
573 |
+
"eval_loss": 0.5073911547660828,
|
574 |
+
"eval_runtime": 251.6593,
|
575 |
+
"eval_samples_per_second": 20.715,
|
576 |
+
"eval_steps_per_second": 2.591,
|
577 |
+
"eval_wer": 0.2897595540156824,
|
578 |
+
"step": 15200
|
579 |
+
},
|
580 |
+
{
|
581 |
+
"epoch": 78.0,
|
582 |
+
"learning_rate": 3.577021276595745e-05,
|
583 |
+
"loss": 0.0264,
|
584 |
+
"step": 15600
|
585 |
+
},
|
586 |
+
{
|
587 |
+
"epoch": 78.0,
|
588 |
+
"eval_loss": 0.49896690249443054,
|
589 |
+
"eval_runtime": 253.2919,
|
590 |
+
"eval_samples_per_second": 20.581,
|
591 |
+
"eval_steps_per_second": 2.574,
|
592 |
+
"eval_wer": 0.28066793445697763,
|
593 |
+
"step": 15600
|
594 |
+
},
|
595 |
+
{
|
596 |
+
"epoch": 80.0,
|
597 |
+
"learning_rate": 3.406808510638298e-05,
|
598 |
+
"loss": 0.0259,
|
599 |
+
"step": 16000
|
600 |
+
},
|
601 |
+
{
|
602 |
+
"epoch": 80.0,
|
603 |
+
"eval_loss": 0.4933984577655792,
|
604 |
+
"eval_runtime": 250.843,
|
605 |
+
"eval_samples_per_second": 20.782,
|
606 |
+
"eval_steps_per_second": 2.599,
|
607 |
+
"eval_wer": 0.28478391121994423,
|
608 |
+
"step": 16000
|
609 |
+
},
|
610 |
+
{
|
611 |
+
"epoch": 82.0,
|
612 |
+
"learning_rate": 3.2365957446808515e-05,
|
613 |
+
"loss": 0.0256,
|
614 |
+
"step": 16400
|
615 |
+
},
|
616 |
+
{
|
617 |
+
"epoch": 82.0,
|
618 |
+
"eval_loss": 0.5108290314674377,
|
619 |
+
"eval_runtime": 254.0809,
|
620 |
+
"eval_samples_per_second": 20.517,
|
621 |
+
"eval_steps_per_second": 2.566,
|
622 |
+
"eval_wer": 0.2834032354703415,
|
623 |
+
"step": 16400
|
624 |
+
},
|
625 |
+
{
|
626 |
+
"epoch": 84.0,
|
627 |
+
"learning_rate": 3.066382978723404e-05,
|
628 |
+
"loss": 0.0241,
|
629 |
+
"step": 16800
|
630 |
+
},
|
631 |
+
{
|
632 |
+
"epoch": 84.0,
|
633 |
+
"eval_loss": 0.5010423064231873,
|
634 |
+
"eval_runtime": 253.2137,
|
635 |
+
"eval_samples_per_second": 20.587,
|
636 |
+
"eval_steps_per_second": 2.575,
|
637 |
+
"eval_wer": 0.28137129757469975,
|
638 |
+
"step": 16800
|
639 |
+
},
|
640 |
+
{
|
641 |
+
"epoch": 86.0,
|
642 |
+
"learning_rate": 2.8961702127659574e-05,
|
643 |
+
"loss": 0.0225,
|
644 |
+
"step": 17200
|
645 |
+
},
|
646 |
+
{
|
647 |
+
"epoch": 86.0,
|
648 |
+
"eval_loss": 0.5098214745521545,
|
649 |
+
"eval_runtime": 251.7862,
|
650 |
+
"eval_samples_per_second": 20.704,
|
651 |
+
"eval_steps_per_second": 2.589,
|
652 |
+
"eval_wer": 0.2809544898012348,
|
653 |
+
"step": 17200
|
654 |
+
},
|
655 |
+
{
|
656 |
+
"epoch": 88.0,
|
657 |
+
"learning_rate": 2.725957446808511e-05,
|
658 |
+
"loss": 0.0214,
|
659 |
+
"step": 17600
|
660 |
+
},
|
661 |
+
{
|
662 |
+
"epoch": 88.0,
|
663 |
+
"eval_loss": 0.5001631379127502,
|
664 |
+
"eval_runtime": 251.7107,
|
665 |
+
"eval_samples_per_second": 20.71,
|
666 |
+
"eval_steps_per_second": 2.59,
|
667 |
+
"eval_wer": 0.27485867611430953,
|
668 |
+
"step": 17600
|
669 |
+
},
|
670 |
+
{
|
671 |
+
"epoch": 90.0,
|
672 |
+
"learning_rate": 2.5565957446808516e-05,
|
673 |
+
"loss": 0.0212,
|
674 |
+
"step": 18000
|
675 |
+
},
|
676 |
+
{
|
677 |
+
"epoch": 90.0,
|
678 |
+
"eval_loss": 0.5039480328559875,
|
679 |
+
"eval_runtime": 256.5398,
|
680 |
+
"eval_samples_per_second": 20.32,
|
681 |
+
"eval_steps_per_second": 2.542,
|
682 |
+
"eval_wer": 0.27517128194440826,
|
683 |
+
"step": 18000
|
684 |
+
},
|
685 |
+
{
|
686 |
+
"epoch": 92.0,
|
687 |
+
"learning_rate": 2.3863829787234044e-05,
|
688 |
+
"loss": 0.0198,
|
689 |
+
"step": 18400
|
690 |
+
},
|
691 |
+
{
|
692 |
+
"epoch": 92.0,
|
693 |
+
"eval_loss": 0.5044199824333191,
|
694 |
+
"eval_runtime": 250.6127,
|
695 |
+
"eval_samples_per_second": 20.801,
|
696 |
+
"eval_steps_per_second": 2.602,
|
697 |
+
"eval_wer": 0.2775939771276734,
|
698 |
+
"step": 18400
|
699 |
+
},
|
700 |
+
{
|
701 |
+
"epoch": 94.0,
|
702 |
+
"learning_rate": 2.2161702127659575e-05,
|
703 |
+
"loss": 0.0194,
|
704 |
+
"step": 18800
|
705 |
+
},
|
706 |
+
{
|
707 |
+
"epoch": 94.0,
|
708 |
+
"eval_loss": 0.5091202855110168,
|
709 |
+
"eval_runtime": 252.5846,
|
710 |
+
"eval_samples_per_second": 20.639,
|
711 |
+
"eval_steps_per_second": 2.581,
|
712 |
+
"eval_wer": 0.2784796936462865,
|
713 |
+
"step": 18800
|
714 |
+
},
|
715 |
+
{
|
716 |
+
"epoch": 96.0,
|
717 |
+
"learning_rate": 2.0459574468085106e-05,
|
718 |
+
"loss": 0.0193,
|
719 |
+
"step": 19200
|
720 |
+
},
|
721 |
+
{
|
722 |
+
"epoch": 96.0,
|
723 |
+
"eval_loss": 0.5078316926956177,
|
724 |
+
"eval_runtime": 254.2064,
|
725 |
+
"eval_samples_per_second": 20.507,
|
726 |
+
"eval_steps_per_second": 2.565,
|
727 |
+
"eval_wer": 0.2763435538072785,
|
728 |
+
"step": 19200
|
729 |
+
},
|
730 |
+
{
|
731 |
+
"epoch": 98.0,
|
732 |
+
"learning_rate": 1.875744680851064e-05,
|
733 |
+
"loss": 0.0189,
|
734 |
+
"step": 19600
|
735 |
+
},
|
736 |
+
{
|
737 |
+
"epoch": 98.0,
|
738 |
+
"eval_loss": 0.50649094581604,
|
739 |
+
"eval_runtime": 250.3726,
|
740 |
+
"eval_samples_per_second": 20.821,
|
741 |
+
"eval_steps_per_second": 2.604,
|
742 |
+
"eval_wer": 0.2715502644124313,
|
743 |
+
"step": 19600
|
744 |
+
},
|
745 |
+
{
|
746 |
+
"epoch": 100.0,
|
747 |
+
"learning_rate": 1.7055319148936173e-05,
|
748 |
+
"loss": 0.0169,
|
749 |
+
"step": 20000
|
750 |
+
},
|
751 |
+
{
|
752 |
+
"epoch": 100.0,
|
753 |
+
"eval_loss": 0.5111807584762573,
|
754 |
+
"eval_runtime": 251.2786,
|
755 |
+
"eval_samples_per_second": 20.746,
|
756 |
+
"eval_steps_per_second": 2.595,
|
757 |
+
"eval_wer": 0.2712897595540157,
|
758 |
+
"step": 20000
|
759 |
+
},
|
760 |
+
{
|
761 |
+
"epoch": 102.0,
|
762 |
+
"learning_rate": 1.5353191489361704e-05,
|
763 |
+
"loss": 0.0162,
|
764 |
+
"step": 20400
|
765 |
+
},
|
766 |
+
{
|
767 |
+
"epoch": 102.0,
|
768 |
+
"eval_loss": 0.5116817951202393,
|
769 |
+
"eval_runtime": 249.9762,
|
770 |
+
"eval_samples_per_second": 20.854,
|
771 |
+
"eval_steps_per_second": 2.608,
|
772 |
+
"eval_wer": 0.27011748769114546,
|
773 |
+
"step": 20400
|
774 |
+
},
|
775 |
+
{
|
776 |
+
"epoch": 104.0,
|
777 |
+
"learning_rate": 1.3651063829787234e-05,
|
778 |
+
"loss": 0.0155,
|
779 |
+
"step": 20800
|
780 |
+
},
|
781 |
+
{
|
782 |
+
"epoch": 104.0,
|
783 |
+
"eval_loss": 0.5181553363800049,
|
784 |
+
"eval_runtime": 252.5167,
|
785 |
+
"eval_samples_per_second": 20.644,
|
786 |
+
"eval_steps_per_second": 2.582,
|
787 |
+
"eval_wer": 0.26858050902649333,
|
788 |
+
"step": 20800
|
789 |
+
},
|
790 |
+
{
|
791 |
+
"epoch": 106.0,
|
792 |
+
"learning_rate": 1.1948936170212767e-05,
|
793 |
+
"loss": 0.0162,
|
794 |
+
"step": 21200
|
795 |
+
},
|
796 |
+
{
|
797 |
+
"epoch": 106.0,
|
798 |
+
"eval_loss": 0.5102177262306213,
|
799 |
+
"eval_runtime": 253.88,
|
800 |
+
"eval_samples_per_second": 20.533,
|
801 |
+
"eval_steps_per_second": 2.568,
|
802 |
+
"eval_wer": 0.26886706437075053,
|
803 |
+
"step": 21200
|
804 |
+
},
|
805 |
+
{
|
806 |
+
"epoch": 108.0,
|
807 |
+
"learning_rate": 1.0246808510638298e-05,
|
808 |
+
"loss": 0.015,
|
809 |
+
"step": 21600
|
810 |
+
},
|
811 |
+
{
|
812 |
+
"epoch": 108.0,
|
813 |
+
"eval_loss": 0.5183060169219971,
|
814 |
+
"eval_runtime": 256.9895,
|
815 |
+
"eval_samples_per_second": 20.285,
|
816 |
+
"eval_steps_per_second": 2.537,
|
817 |
+
"eval_wer": 0.27050824497876885,
|
818 |
+
"step": 21600
|
819 |
+
},
|
820 |
+
{
|
821 |
+
"epoch": 110.0,
|
822 |
+
"learning_rate": 8.54468085106383e-06,
|
823 |
+
"loss": 0.0148,
|
824 |
+
"step": 22000
|
825 |
+
},
|
826 |
+
{
|
827 |
+
"epoch": 110.0,
|
828 |
+
"eval_loss": 0.524269700050354,
|
829 |
+
"eval_runtime": 251.4198,
|
830 |
+
"eval_samples_per_second": 20.734,
|
831 |
+
"eval_steps_per_second": 2.593,
|
832 |
+
"eval_wer": 0.269075468257483,
|
833 |
+
"step": 22000
|
834 |
+
},
|
835 |
+
{
|
836 |
+
"epoch": 112.0,
|
837 |
+
"learning_rate": 6.846808510638299e-06,
|
838 |
+
"loss": 0.0142,
|
839 |
+
"step": 22400
|
840 |
+
},
|
841 |
+
{
|
842 |
+
"epoch": 112.0,
|
843 |
+
"eval_loss": 0.5115846991539001,
|
844 |
+
"eval_runtime": 252.9673,
|
845 |
+
"eval_samples_per_second": 20.607,
|
846 |
+
"eval_steps_per_second": 2.577,
|
847 |
+
"eval_wer": 0.269075468257483,
|
848 |
+
"step": 22400
|
849 |
+
},
|
850 |
+
{
|
851 |
+
"epoch": 114.0,
|
852 |
+
"learning_rate": 5.14468085106383e-06,
|
853 |
+
"loss": 0.0133,
|
854 |
+
"step": 22800
|
855 |
+
},
|
856 |
+
{
|
857 |
+
"epoch": 114.0,
|
858 |
+
"eval_loss": 0.5274414420127869,
|
859 |
+
"eval_runtime": 253.5713,
|
860 |
+
"eval_samples_per_second": 20.558,
|
861 |
+
"eval_steps_per_second": 2.571,
|
862 |
+
"eval_wer": 0.2676166410503556,
|
863 |
+
"step": 22800
|
864 |
+
},
|
865 |
+
{
|
866 |
+
"epoch": 116.0,
|
867 |
+
"learning_rate": 3.4425531914893614e-06,
|
868 |
+
"loss": 0.0138,
|
869 |
+
"step": 23200
|
870 |
+
},
|
871 |
+
{
|
872 |
+
"epoch": 116.0,
|
873 |
+
"eval_loss": 0.5216977596282959,
|
874 |
+
"eval_runtime": 253.8491,
|
875 |
+
"eval_samples_per_second": 20.536,
|
876 |
+
"eval_steps_per_second": 2.568,
|
877 |
+
"eval_wer": 0.26722588376273215,
|
878 |
+
"step": 23200
|
879 |
+
},
|
880 |
+
{
|
881 |
+
"epoch": 118.0,
|
882 |
+
"learning_rate": 1.7404255319148937e-06,
|
883 |
+
"loss": 0.0135,
|
884 |
+
"step": 23600
|
885 |
+
},
|
886 |
+
{
|
887 |
+
"epoch": 118.0,
|
888 |
+
"eval_loss": 0.5230008363723755,
|
889 |
+
"eval_runtime": 257.6327,
|
890 |
+
"eval_samples_per_second": 20.234,
|
891 |
+
"eval_steps_per_second": 2.531,
|
892 |
+
"eval_wer": 0.2673300857060984,
|
893 |
+
"step": 23600
|
894 |
+
},
|
895 |
+
{
|
896 |
+
"epoch": 120.0,
|
897 |
+
"learning_rate": 3.8297872340425535e-08,
|
898 |
+
"loss": 0.0134,
|
899 |
+
"step": 24000
|
900 |
+
},
|
901 |
+
{
|
902 |
+
"epoch": 120.0,
|
903 |
+
"eval_loss": 0.5197370648384094,
|
904 |
+
"eval_runtime": 253.9198,
|
905 |
+
"eval_samples_per_second": 20.53,
|
906 |
+
"eval_steps_per_second": 2.568,
|
907 |
+
"eval_wer": 0.2666527730742178,
|
908 |
+
"step": 24000
|
909 |
+
}
|
910 |
+
],
|
911 |
+
"max_steps": 24000,
|
912 |
+
"num_train_epochs": 120,
|
913 |
+
"total_flos": 1.951590305315055e+20,
|
914 |
+
"trial_name": null,
|
915 |
+
"trial_params": null
|
916 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fbd4d80b91239ccb59f3fd4c3d9a0971fbd7a93d783fbf38499a7e2b8aef470e
|
3 |
+
size 2991
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"آ": 1, "ئ": 2, "ا": 3, "ب": 4, "ت": 5, "ث": 6, "ج": 7, "ح": 8, "خ": 9, "د": 10, "ذ": 11, "ر": 12, "ز": 13, "س": 14, "ش": 15, "ص": 16, "ض": 17, "ط": 18, "ظ": 19, "ع": 20, "غ": 21, "ف": 22, "ق": 23, "ل": 24, "م": 25, "ن": 26, "ه": 27, "و": 28, "پ": 29, "چ": 30, "ژ": 31, "ک": 32, "گ": 33, "ی": 34, "": 35, "|": 0, "[UNK]": 36, "[PAD]": 37}
|