Santiago Hincapie commited on
Commit
e6b6953
1 Parent(s): b78894c

feat: update model config

Browse files
added_tokens.json DELETED
@@ -1 +0,0 @@
1
- {"<s>": 37, "</s>": 38}
 
 
config.json DELETED
@@ -1,107 +0,0 @@
1
- {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
- "activation_dropout": 0.0,
4
- "adapter_kernel_size": 3,
5
- "adapter_stride": 2,
6
- "add_adapter": false,
7
- "apply_spec_augment": true,
8
- "architectures": [
9
- "Wav2Vec2ForCTC"
10
- ],
11
- "attention_dropout": 0.0,
12
- "bos_token_id": 1,
13
- "classifier_proj_size": 256,
14
- "codevector_dim": 768,
15
- "contrastive_logits_temperature": 0.1,
16
- "conv_bias": true,
17
- "conv_dim": [
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512
25
- ],
26
- "conv_kernel": [
27
- 10,
28
- 3,
29
- 3,
30
- 3,
31
- 3,
32
- 2,
33
- 2
34
- ],
35
- "conv_stride": [
36
- 5,
37
- 2,
38
- 2,
39
- 2,
40
- 2,
41
- 2,
42
- 2
43
- ],
44
- "ctc_loss_reduction": "mean",
45
- "ctc_zero_infinity": false,
46
- "diversity_loss_weight": 0.1,
47
- "do_stable_layer_norm": true,
48
- "eos_token_id": 2,
49
- "feat_extract_activation": "gelu",
50
- "feat_extract_dropout": 0.0,
51
- "feat_extract_norm": "layer",
52
- "feat_proj_dropout": 0.0,
53
- "feat_quantizer_dropout": 0.0,
54
- "final_dropout": 0.0,
55
- "hidden_act": "gelu",
56
- "hidden_dropout": 0.0,
57
- "hidden_size": 1024,
58
- "initializer_range": 0.02,
59
- "intermediate_size": 4096,
60
- "layer_norm_eps": 1e-05,
61
- "layerdrop": 0.0,
62
- "mask_feature_length": 64,
63
- "mask_feature_min_masks": 0,
64
- "mask_feature_prob": 0.25,
65
- "mask_time_length": 10,
66
- "mask_time_min_masks": 2,
67
- "mask_time_prob": 0.75,
68
- "model_type": "wav2vec2",
69
- "num_adapter_layers": 3,
70
- "num_attention_heads": 16,
71
- "num_codevector_groups": 2,
72
- "num_codevectors_per_group": 320,
73
- "num_conv_pos_embedding_groups": 16,
74
- "num_conv_pos_embeddings": 128,
75
- "num_feat_extract_layers": 7,
76
- "num_hidden_layers": 24,
77
- "num_negatives": 100,
78
- "output_hidden_size": 1024,
79
- "pad_token_id": 36,
80
- "proj_codevector_dim": 768,
81
- "tdnn_dilation": [
82
- 1,
83
- 2,
84
- 3,
85
- 1,
86
- 1
87
- ],
88
- "tdnn_dim": [
89
- 512,
90
- 512,
91
- 512,
92
- 512,
93
- 1500
94
- ],
95
- "tdnn_kernel": [
96
- 5,
97
- 3,
98
- 3,
99
- 1,
100
- 1
101
- ],
102
- "torch_dtype": "float32",
103
- "transformers_version": "4.16.0.dev0",
104
- "use_weighted_layer_sum": false,
105
- "vocab_size": 39,
106
- "xvector_output_dim": 512
107
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
preprocessor_config.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "do_normalize": true,
3
- "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
- "feature_size": 1,
5
- "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": true,
8
- "sampling_rate": 16000
9
- }
 
 
 
 
 
 
 
 
 
 
pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:122d5480924adf4705e1f37e28c0c798df35e76a41767b73474d3d5df8c1062f
3
- size 1262083569
 
 
 
 
run.sh CHANGED
@@ -1,36 +1,38 @@
1
  #!/bin/sh
2
 
3
  export CUDA_VISIBLE_DEVICES=1,2
 
4
  python src/run_speech_recognition_ctc_bnb.py \
5
  --dataset_name="mozilla-foundation/common_voice_7_0" \
6
- --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
7
  --dataset_config_name="et" \
8
  --output_dir="./" \
9
  --overwrite_output_dir \
10
- --num_train_epochs="50" \
11
- --per_device_train_batch_size="16" \
12
- --per_device_eval_batch_size="16" \
13
- --gradient_accumulation_steps="2" \
14
- --learning_rate="7.5e-5" \
15
- --save_total_limit="3" \
16
- --warmup_steps="2000" \
17
- --evaluation_strategy="steps" \
18
- --text_column_name="sentence" \
19
- --length_column_name="input_length" \
20
- --save_steps="500" \
21
- --eval_steps="500" \
22
- --logging_steps="100" \
23
- --layerdrop="0.0" \
24
  --freeze_feature_encoder \
25
- --feat_proj_dropout="0.0" \
26
- --mask_time_prob="0.75" \
27
- --mask_time_length="10" \
28
- --mask_feature_prob="0.25" \
29
- --mask_feature_length="64" \
30
  --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
31
  --gradient_checkpointing \
32
- --use_auth_token \
33
  --fp16 \
34
  --group_by_length \
 
 
 
 
35
  --do_train --do_eval \
36
- --push_to_hub
 
 
1
  #!/bin/sh
2
 
3
  export CUDA_VISIBLE_DEVICES=1,2
4
+
5
  python src/run_speech_recognition_ctc_bnb.py \
6
  --dataset_name="mozilla-foundation/common_voice_7_0" \
7
+ --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
8
  --dataset_config_name="et" \
9
  --output_dir="./" \
10
  --overwrite_output_dir \
11
+ --num_train_epochs=100 \
12
+ --per_device_train_batch_size=32 \
13
+ --per_device_eval_batch_size=32 \
14
+ --gradient_accumulation_steps=2 \
15
+ --learning_rate=3e-4 \
16
+ --save_total_limit=3 \
17
+ --warmup_steps=500 \
18
+ --evaluation_strategy=steps \
19
+ --text_column_name=sentence \
20
+ --length_column_name=input_length \
21
+ --save_steps=500 \
22
+ --eval_steps=500 \
23
+ --logging_steps=100 \
24
+ --layerdrop=0.0 \
25
  --freeze_feature_encoder \
26
+ --feat_proj_dropout=0.1 \
 
 
 
 
27
  --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
28
  --gradient_checkpointing \
29
+ --lr_scheduler_type=cosine \
30
  --fp16 \
31
  --group_by_length \
32
+ --mask_time_prob=0.1 \
33
+ --mask_time_length=10 \
34
+ --report_to=wandb \
35
+ --run_name="cosine+drop_proj+low_specaugment-1b" \
36
  --do_train --do_eval \
37
+ --use_auth_token \
38
+ --push_to_hub
special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
 
tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
 
 
training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3e82d9fe00fea24d2f00c4b477dbd5b91e33cea6ba3cf8903f298d8f42fefbe
3
- size 2991
 
 
 
 
vocab.json DELETED
@@ -1 +0,0 @@
1
- {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "ä": 27, "õ": 28, "ö": 29, "ü": 30, "š": 31, "ž": 32, "̇": 33, "„": 34, "|": 0, "[UNK]": 35, "[PAD]": 36}