kingabzpro commited on
Commit
5aeb352
1 Parent(s): 8bacf2d

adding all files

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: es
3
+ datasets:
4
+ - AI4D Baamtu Datamation - Automatic Speech Recognition in WOLOF
5
+ tags:
6
+ - speech
7
+ - audio
8
+ - automatic-speech-recognition
9
+ license: apache-2.0
10
+ ---
11
+
12
+ ## Evaluation on Common Voice ES Test
13
+ ```python
14
+ import pandas as pd
15
+ from datasets import load_dataset, load_metric,Dataset
16
+ from tqdm import tqdm
17
+ import torch
18
+ import soundfile as sf
19
+ import torchaudio
20
+ from transformers import Wav2Vec2ForCTC
21
+ from transformers import Wav2Vec2Processor
22
+ from transformers import Wav2Vec2FeatureExtractor
23
+ from transformers import Wav2Vec2CTCTokenizer
24
+
25
+ model_name = "kingabzpro/wav2vec2-large-xlsr-53-wolof"
26
+ device = "cuda"
27
+
28
+ model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
29
+ processor = Wav2Vec2Processor.from_pretrained(model_name)
30
+
31
+ val =pd.read_csv("../input/automatic-speech-recognition-in-wolof/Test.csv")
32
+ val["path"] = "../input/automatic-speech-recognition-in-wolof/Noise Removed/tmp/WOLOF_ASR_dataset/noise_remove/"+val["ID"]+".wav"
33
+ val.rename(columns = {'transcription':'sentence'}, inplace = True)
34
+ common_voice_val = Dataset.from_pandas(val)
35
+
36
+ def speech_file_to_array_fn_test(batch):
37
+ speech_array, sampling_rate = sf.read(batch["path"])#(.wav) 16000 sample rate
38
+ batch["speech"] = speech_array
39
+ batch["sampling_rate"] = sampling_rate
40
+ return batch
41
+
42
+ def prepare_dataset_test(batch):
43
+ # check that all files have the correct sampling rate
44
+ assert (
45
+ len(set(batch["sampling_rate"])) == 1
46
+ ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
47
+
48
+ batch["input_values"] = processor(batch["speech"], padding=True,sampling_rate=batch["sampling_rate"][0]).input_values
49
+ return batch
50
+
51
+ common_voice_val = common_voice_val.remove_columns([ "ID","age", "down_votes", "gender", "up_votes"]) # Remove columns
52
+ common_voice_val = common_voice_val.map(speech_file_to_array_fn_test, remove_columns=common_voice_val.column_names)# Applying speech_file_to_array function
53
+ common_voice_val = common_voice_val.map(prepare_dataset_test, remove_columns=common_voice_val.column_names, batch_size=8, num_proc=4, batched=True)# Applying prepare_dataset_test function
54
+
55
+ final_pred = []
56
+ for i in tqdm(range(common_voice_val.shape[0])):# Testing model on Wolof Dataset
57
+ input_dict = processor(common_voice_val[i]["input_values"], return_tensors="pt", padding=True)
58
+
59
+ logits = model(input_dict.input_values.to("cuda")).logits
60
+
61
+ pred_ids = torch.argmax(logits, dim=-1)[0]
62
+ prediction = processor.decode(pred_ids)
63
+ final_pred.append(prediction)
64
+
65
+ ```
66
+ *You can check my result on [Zindi](https://zindi.africa/competitions/ai4d-baamtu-datamation-automatic-speech-recognition-in-wolof/leaderboard), I got 8th rank in AI4D Baamtu Datamation - Automatic Speech Recognition in WOLOF *
67
+
68
+ **Result**: 7.88 %
config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
+ "activation_dropout": 0.0,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "Wav2Vec2ForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "conv_bias": true,
11
+ "conv_dim": [
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512
19
+ ],
20
+ "conv_kernel": [
21
+ 10,
22
+ 3,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 2,
27
+ 2
28
+ ],
29
+ "conv_stride": [
30
+ 5,
31
+ 2,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2
37
+ ],
38
+ "ctc_loss_reduction": "mean",
39
+ "ctc_zero_infinity": false,
40
+ "do_stable_layer_norm": true,
41
+ "eos_token_id": 2,
42
+ "feat_extract_activation": "gelu",
43
+ "feat_extract_dropout": 0.0,
44
+ "feat_extract_norm": "layer",
45
+ "feat_proj_dropout": 0.0,
46
+ "final_dropout": 0.0,
47
+ "gradient_checkpointing": true,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 1024,
51
+ "initializer_range": 0.02,
52
+ "intermediate_size": 4096,
53
+ "layer_norm_eps": 1e-05,
54
+ "layerdrop": 0.1,
55
+ "mask_channel_length": 10,
56
+ "mask_channel_min_space": 1,
57
+ "mask_channel_other": 0.0,
58
+ "mask_channel_prob": 0.0,
59
+ "mask_channel_selection": "static",
60
+ "mask_feature_length": 10,
61
+ "mask_feature_prob": 0.0,
62
+ "mask_time_length": 10,
63
+ "mask_time_min_space": 1,
64
+ "mask_time_other": 0.0,
65
+ "mask_time_prob": 0.05,
66
+ "mask_time_selection": "static",
67
+ "model_type": "wav2vec2",
68
+ "num_attention_heads": 16,
69
+ "num_conv_pos_embedding_groups": 16,
70
+ "num_conv_pos_embeddings": 128,
71
+ "num_feat_extract_layers": 7,
72
+ "num_hidden_layers": 24,
73
+ "pad_token_id": 40,
74
+ "transformers_version": "4.4.2",
75
+ "vocab_size": 41
76
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_size": 1,
4
+ "padding_side": "right",
5
+ "padding_value": 0.0,
6
+ "return_attention_mask": true,
7
+ "sampling_rate": 16000
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cb3f79f2a782ea5fff2ce30239c0fc9d0c35b29d31f2169befed677244854d5
3
+ size 1262101911
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|"}
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"’": 0, "o": 1, "f": 2, "n": 3, "b": 4, "ç": 5, "m": 6, "é": 7, "(": 8, "c": 9, "s": 10, "w": 11, "z": 12, "h": 13, "â": 14, "œ": 15, "q": 16, "l": 17, "u": 18, "r": 19, "i": 20, "v": 21, "î": 22, "t": 23, "ë": 24, "x": 25, "a": 26, "'": 27, ")": 28, "e": 29, "ô": 30, "è": 31, "g": 33, "j": 34, "y": 35, "d": 36, "p": 37, "k": 38, "|": 32, "[UNK]": 39, "[PAD]": 40}