dragonSwing commited on
Commit
4277bc7
1 Parent(s): f060713

commit from $USER

Browse files
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: vi
3
+ datasets:
4
+ - vlsp
5
+ tags:
6
+ - speech
7
+ - automatic-speech-recognition
8
+ license: apache-2.0
9
+ ---
10
+ # Wav2Vec2-Base-Pretrain-Vietnamese
11
+ The base model is pre-trained on 16kHz sampled speech audio from 100h Vietnamese unlabelled data in [VLSP dataset](https://drive.google.com/file/d/1vUSxdORDxk-ePUt-bUVDahpoXiqKchMx/view?usp=sharing). When using the model make sure that your speech input is also sampled at 16Khz. Note that this model should be fine-tuned on a downstream task, like Vietnamese Automatic Speech Recognition.
12
+ [Facebook's Wav2Vec2 blog](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/)
13
+ [Paper](https://arxiv.org/abs/2006.11477)
14
+
15
+
16
+ # Usage
17
+ See [this notebook](https://colab.research.google.com/drive/1FjTsqbYKphl9kL-eILgUc-bl4zVThL8F?usp=sharing) for more information on how to fine-tune the English pre-trained model.
config.json ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.1,
3
+ "apply_spec_augment": true,
4
+ "architectures": [
5
+ "Wav2Vec2ForPreTraining"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "bos_token_id": 1,
9
+ "codevector_dim": 256,
10
+ "contrastive_logits_temperature": 0.1,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "diversity_loss_weight": 0.1,
42
+ "do_stable_layer_norm": false,
43
+ "eos_token_id": 2,
44
+ "feat_extract_activation": "gelu",
45
+ "feat_extract_norm": "group",
46
+ "feat_proj_dropout": 0.1,
47
+ "feat_quantizer_dropout": 0.0,
48
+ "final_dropout": 0.1,
49
+ "gradient_checkpointing": false,
50
+ "hidden_act": "gelu",
51
+ "hidden_dropout": 0.1,
52
+ "hidden_size": 768,
53
+ "initializer_range": 0.02,
54
+ "intermediate_size": 3072,
55
+ "layer_norm_eps": 1e-05,
56
+ "layerdrop": 0.1,
57
+ "mask_feature_length": 10,
58
+ "mask_feature_prob": 0.0,
59
+ "mask_time_length": 10,
60
+ "mask_time_prob": 0.05,
61
+ "model_type": "wav2vec2",
62
+ "num_attention_heads": 12,
63
+ "num_codevector_groups": 2,
64
+ "num_codevectors_per_group": 320,
65
+ "num_conv_pos_embedding_groups": 16,
66
+ "num_conv_pos_embeddings": 128,
67
+ "num_feat_extract_layers": 7,
68
+ "num_hidden_layers": 12,
69
+ "num_negatives": 100,
70
+ "pad_token_id": 0,
71
+ "proj_codevector_dim": 256,
72
+ "transformers_version": "4.8.2",
73
+ "vocab_size": 32
74
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:249ccbc8d972e557c129ff9f403fb94fc3f688a2050e0c67adbba9dccf98cce3
3
+ size 380267417
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|"}
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "j": 5, "ũ": 6, "f": 7, "v": 8, "ỗ": 9, "ạ": 10, "ể": 11, "é": 12, "è": 13, "ọ": 14, "s": 15, "ẻ": 16, "b": 17, "ữ": 18, "w": 19, "g": 20, "ì": 21, "k": 22, "ứ": 23, "ố": 24, "ở": 25, "e": 26, "à": 27, "â": 28, "á": 29, "ẵ": 30, "í": 31, "ử": 32, "ớ": 33, "ằ": 34, "ẩ": 35, "ẽ": 36, "ủ": 37, "ả": 38, "ệ": 39, "i": 40, "ă": 41, "ặ": 42, "d": 43, "ờ": 44, "ề": 45, "ồ": 46, "ừ": 47, "ổ": 48, "o": 49, "h": 50, "ấ": 51, "ẳ": 52, "ỳ": 53, "n": 54, "ụ": 55, "y": 56, "r": 57, "đ": 58, "ẫ": 59, "ỏ": 60, "ẹ": 61, "ễ": 62, "ĩ": 63, "ế": 64, "ỹ": 65, "p": 66, "ị": 67, "ộ": 68, "ã": 69, "ý": 70, "ắ": 71, "z": 72, "ô": 73, "ù": 74, "m": 75, "õ": 76, "c": 77, "t": 78, "ự": 79, "ợ": 80, "u": 81, "ê": 82, "ậ": 83, "ỡ": 84, "ỵ": 85, "ư": 86, "x": 87, "a": 88, "ó": 89, "ỉ": 90, "ỷ": 91, "l": 92, "ầ": 93, "q": 94, "ú": 95, "ò": 96, "ơ": 97}