tyoc213 commited on
Commit
71c1843
1 Parent(s): c3b675f

update to 0.404281 wer

Browse files
Files changed (3) hide show
  1. config.json +8 -8
  2. pytorch_model.bin +2 -2
  3. vocab.json +1 -1
config.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
- "activation_dropout": 0.0,
4
  "apply_spec_augment": true,
5
  "architectures": [
6
  "Wav2Vec2ForCTC"
7
  ],
8
- "attention_dropout": 0.1,
9
  "bos_token_id": 1,
10
  "conv_bias": true,
11
  "conv_dim": [
@@ -42,16 +42,16 @@
42
  "feat_extract_activation": "gelu",
43
  "feat_extract_dropout": 0.0,
44
  "feat_extract_norm": "layer",
45
- "feat_proj_dropout": 0.0,
46
  "final_dropout": 0.0,
47
  "gradient_checkpointing": true,
48
  "hidden_act": "gelu",
49
- "hidden_dropout": 0.1,
50
  "hidden_size": 1024,
51
  "initializer_range": 0.02,
52
  "intermediate_size": 4096,
53
  "layer_norm_eps": 1e-05,
54
- "layerdrop": 0.1,
55
  "mask_channel_length": 10,
56
  "mask_channel_min_space": 1,
57
  "mask_channel_other": 0.0,
@@ -62,7 +62,7 @@
62
  "mask_time_length": 10,
63
  "mask_time_min_space": 1,
64
  "mask_time_other": 0.0,
65
- "mask_time_prob": 0.05,
66
  "mask_time_selection": "static",
67
  "model_type": "wav2vec2",
68
  "num_attention_heads": 16,
@@ -70,7 +70,7 @@
70
  "num_conv_pos_embeddings": 128,
71
  "num_feat_extract_layers": 7,
72
  "num_hidden_layers": 24,
73
- "pad_token_id": 44,
74
  "transformers_version": "4.5.0.dev0",
75
- "vocab_size": 45
76
  }
 
1
  {
2
  "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
+ "activation_dropout": 0.055,
4
  "apply_spec_augment": true,
5
  "architectures": [
6
  "Wav2Vec2ForCTC"
7
  ],
8
+ "attention_dropout": 0.094,
9
  "bos_token_id": 1,
10
  "conv_bias": true,
11
  "conv_dim": [
 
42
  "feat_extract_activation": "gelu",
43
  "feat_extract_dropout": 0.0,
44
  "feat_extract_norm": "layer",
45
+ "feat_proj_dropout": 0.04,
46
  "final_dropout": 0.0,
47
  "gradient_checkpointing": true,
48
  "hidden_act": "gelu",
49
+ "hidden_dropout": 0.047,
50
  "hidden_size": 1024,
51
  "initializer_range": 0.02,
52
  "intermediate_size": 4096,
53
  "layer_norm_eps": 1e-05,
54
+ "layerdrop": 0.041,
55
  "mask_channel_length": 10,
56
  "mask_channel_min_space": 1,
57
  "mask_channel_other": 0.0,
 
62
  "mask_time_length": 10,
63
  "mask_time_min_space": 1,
64
  "mask_time_other": 0.0,
65
+ "mask_time_prob": 0.082,
66
  "mask_time_selection": "static",
67
  "model_type": "wav2vec2",
68
  "num_attention_heads": 16,
 
70
  "num_conv_pos_embeddings": 128,
71
  "num_feat_extract_layers": 7,
72
  "num_hidden_layers": 24,
73
+ "pad_token_id": 51,
74
  "transformers_version": "4.5.0.dev0",
75
+ "vocab_size": 52
76
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f05c8cadb48e7e8aa1177ab3193ad36b7871134416dbaccd5c773220cf44dcc2
3
- size 1262118359
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:307e18fcc2bbe0e93b45a0bfa7c5bdd71495368497148238bdef7062eb8deed6
3
+ size 1262147031
vocab.json CHANGED
@@ -1 +1 @@
1
- {"x": 0, "v": 1, "]": 2, "í": 3, ":": 4, "k": 5, "y": 6, "ö": 7, "'": 8, "h": 9, "¿": 11, "ñ": 12, "n": 13, "ü": 14, "ä": 15, "t": 16, "m": 17, "s": 18, "g": 19, "á": 20, "z": 21, "o": 22, "w": 23, "[": 24, "r": 25, "b": 26, "ß": 27, "d": 28, "ó": 29, "i": 30, "e": 31, "": 32, "ú": 33, "c": 34, "f": 35, "p": 36, "a": 37, "l": 38, "q": 39, "j": 40, "u": 41, "é": 42, "|": 10, "[UNK]": 43, "[PAD]": 44}
 
1
+ {"\u00df": 1, "t": 2, "h": 3, "/": 4, "d": 5, "y": 6, "a": 7, "*": 8, "\u00a1": 9, "w": 10, "m": 11, "\u00fc": 12, "\u00e4": 13, "i": 14, "\u00e9": 15, "'": 16, "$": 17, "g": 18, "l": 19, "]": 20, ">": 21, "k": 22, "c": 23, "n": 24, "\u00e6": 25, "o": 26, "f": 27, "e": 28, "u": 29, "r": 30, "v": 31, "s": 32, "\u00f1": 33, "[": 34, "z": 35, "q": 36, "<": 37, "b": 38, "\u00ed": 39, "\u00fa": 40, "j": 41, "\u00bf": 42, "\u00f6": 43, "p": 44, "x": 45, "\u201e": 46, ":": 47, "\u00f3": 48, "\u00e1": 49, "|": 0, "[UNK]": 50, "[PAD]": 51}