mostafaashahin
commited on
Commit
•
f067cca
1
Parent(s):
184ecd2
Upload 8 files
Browse files- README.md +25 -0
- config.json +79 -0
- pipeline.py +88 -0
- preprocessor_config.json +9 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +1 -0
README.md
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- automatic-speech-recognition
|
4 |
+
library_name: generic
|
5 |
+
---
|
6 |
+
|
7 |
+
# Automatic Speech Recognition repository template
|
8 |
+
|
9 |
+
This is a template repository for Automatic Speech Recognition to support generic inference with Hugging Face Hub generic Inference API. There are two required steps:
|
10 |
+
|
11 |
+
1. Specify the requirements by defining a `requirements.txt` file.
|
12 |
+
2. Implement the `pipeline.py` `__init__` and `__call__` methods. These methods are called by the Inference API. The `__init__` method should load the model and preload all the elements needed for inference (model, processors, tokenizers, etc.). This is only called once. The `__call__` method performs the actual inference. Make sure to follow the same input/output specifications defined in the template for the pipeline to work.
|
13 |
+
|
14 |
+
Example repos
|
15 |
+
* https://huggingface.co/osanseviero/pyctcdecode_asr
|
16 |
+
|
17 |
+
## How to start
|
18 |
+
First create a repo in https://hf.co/new.
|
19 |
+
Then clone this template and push it to your repo.
|
20 |
+
```
|
21 |
+
git clone https://huggingface.co/templates/automatic-speech-recognition
|
22 |
+
cd automatic-speech-recognition
|
23 |
+
git remote set-url origin https://huggingface.co/$YOUR_USER/$YOUR_REPO_NAME
|
24 |
+
git push --force
|
25 |
+
```
|
config.json
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-large-robust",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForCTC"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"classifier_proj_size": 256,
|
11 |
+
"codevector_dim": 768,
|
12 |
+
"contrastive_logits_temperature": 0.1,
|
13 |
+
"conv_bias": true,
|
14 |
+
"conv_dim": [
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512
|
22 |
+
],
|
23 |
+
"conv_kernel": [
|
24 |
+
10,
|
25 |
+
3,
|
26 |
+
3,
|
27 |
+
3,
|
28 |
+
3,
|
29 |
+
2,
|
30 |
+
2
|
31 |
+
],
|
32 |
+
"conv_stride": [
|
33 |
+
5,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2
|
40 |
+
],
|
41 |
+
"ctc_loss_reduction": "mean",
|
42 |
+
"ctc_zero_infinity": false,
|
43 |
+
"diversity_loss_weight": 0.1,
|
44 |
+
"do_stable_layer_norm": true,
|
45 |
+
"eos_token_id": 2,
|
46 |
+
"feat_extract_activation": "gelu",
|
47 |
+
"feat_extract_dropout": 0.0,
|
48 |
+
"feat_extract_norm": "layer",
|
49 |
+
"feat_proj_dropout": 0.1,
|
50 |
+
"feat_quantizer_dropout": 0.0,
|
51 |
+
"final_dropout": 0.1,
|
52 |
+
"hidden_act": "gelu",
|
53 |
+
"hidden_dropout": 0.1,
|
54 |
+
"hidden_dropout_prob": 0.1,
|
55 |
+
"hidden_size": 1024,
|
56 |
+
"initializer_range": 0.02,
|
57 |
+
"intermediate_size": 4096,
|
58 |
+
"layer_norm_eps": 1e-05,
|
59 |
+
"layerdrop": 0.1,
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_prob": 0.05,
|
64 |
+
"model_type": "wav2vec2",
|
65 |
+
"num_attention_heads": 16,
|
66 |
+
"num_codevector_groups": 2,
|
67 |
+
"num_codevectors_per_group": 320,
|
68 |
+
"num_conv_pos_embedding_groups": 16,
|
69 |
+
"num_conv_pos_embeddings": 128,
|
70 |
+
"num_feat_extract_layers": 7,
|
71 |
+
"num_hidden_layers": 24,
|
72 |
+
"num_negatives": 100,
|
73 |
+
"pad_token_id": 0,
|
74 |
+
"proj_codevector_dim": 768,
|
75 |
+
"torch_dtype": "float32",
|
76 |
+
"transformers_version": "4.12.5",
|
77 |
+
"use_weighted_layer_sum": false,
|
78 |
+
"vocab_size": 71
|
79 |
+
}
|
pipeline.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
import numpy as np
|
3 |
+
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
|
4 |
+
import torch
|
5 |
+
|
6 |
+
|
7 |
+
#define groups
|
8 |
+
#make sure that all phonemes covered in each group
|
9 |
+
g1 = ['p_alveolar','n_alveolar']
|
10 |
+
g2 = ['p_palatal','n_palatal']
|
11 |
+
g3 = ['p_dental','n_dental']
|
12 |
+
g4 = ['p_glottal','n_glottal']
|
13 |
+
g5 = ['p_labial','n_labial']
|
14 |
+
g6 = ['p_velar','n_velar']
|
15 |
+
g7 = ['p_anterior','n_anterior']
|
16 |
+
g8 = ['p_posterior','n_posterior']
|
17 |
+
g9 = ['p_retroflex','n_retroflex']
|
18 |
+
g10 = ['p_mid','n_mid']
|
19 |
+
g11 = ['p_high_v','n_high_v']
|
20 |
+
g12 = ['p_low','n_low']
|
21 |
+
g13 = ['p_front','n_front']
|
22 |
+
g14 = ['p_back','n_back']
|
23 |
+
g15 = ['p_central','n_central']
|
24 |
+
g16 = ['p_consonant','n_consonant']
|
25 |
+
g17 = ['p_sonorant','n_sonorant']
|
26 |
+
g18 = ['p_long','n_long']
|
27 |
+
g19 = ['p_short','n_short']
|
28 |
+
g20 = ['p_vowel','n_vowel']
|
29 |
+
g21 = ['p_semivowel','n_semivowel']
|
30 |
+
g22 = ['p_fricative','n_fricative']
|
31 |
+
g23 = ['p_nasal','n_nasal']
|
32 |
+
g24 = ['p_stop','n_stop']
|
33 |
+
g25 = ['p_approximant','n_approximant']
|
34 |
+
g26 = ['p_affricate','n_affricate']
|
35 |
+
g27 = ['p_liquid','n_liquid']
|
36 |
+
g28 = ['p_continuant','n_continuant']
|
37 |
+
g29 = ['p_monophthong','n_monophthong']
|
38 |
+
g30 = ['p_diphthong','n_diphthong']
|
39 |
+
g31 = ['p_round','n_round']
|
40 |
+
g32 = ['p_voiced','n_voiced']
|
41 |
+
g33 = ['p_bilabial','n_bilabial']
|
42 |
+
g34 = ['p_coronal','n_coronal']
|
43 |
+
g35 = ['p_dorsal','n_dorsal']
|
44 |
+
groups = [g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12,g13,g14,g15,g16,g17,g18,g19,g20,g21,g22,g23,g24,g25,g26,g27,g28,g29,g30,g31,g32,g33,g34,g35]
|
45 |
+
|
46 |
+
class PreTrainedPipeline():
|
47 |
+
def __init__(self, path=""):
|
48 |
+
# IMPLEMENT_THIS
|
49 |
+
# Preload all the elements you are going to need at inference.
|
50 |
+
# For instance your model, processors, tokenizer that might be needed.
|
51 |
+
# This function is only called once, so do all the heavy processing I/O here"""
|
52 |
+
self.sampling_rate = 16000
|
53 |
+
|
54 |
+
self.processor = Wav2Vec2Processor.from_pretrained('.')
|
55 |
+
self.model = Wav2Vec2ForCTC.from_pretrained('.')
|
56 |
+
self.group_ids = [sorted(self.processor.tokenizer.convert_tokens_to_ids(group)) for group in groups]
|
57 |
+
self.group_ids = [dict([(x[0]+1,x[1]) for x in list(enumerate(g))]) for g in self.group_ids] #This is the inversion of the one used in training as here we need to map prediction back to original tokens
|
58 |
+
|
59 |
+
|
60 |
+
def __call__(self, inputs: np.array)-> Dict[str, str]:
|
61 |
+
"""
|
62 |
+
Args:
|
63 |
+
inputs (:obj:`np.array`):
|
64 |
+
The raw waveform of audio received. By default at 16KHz.
|
65 |
+
Return:
|
66 |
+
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
|
67 |
+
the detected text from the input audio.
|
68 |
+
"""
|
69 |
+
# IMPLEMENT_THIS
|
70 |
+
input_values = self.processor(audio=inputs, sampling_rate=self.sampling_rate, return_tensors="pt").input_values
|
71 |
+
|
72 |
+
if torch.cuda.is_available():
|
73 |
+
self.model.to("cuda")
|
74 |
+
input_values = input_values.to("cuda")
|
75 |
+
|
76 |
+
with torch.no_grad():
|
77 |
+
logits = self.model(input_values).logits
|
78 |
+
|
79 |
+
mask = torch.zeros(logits.size()[2], dtype = torch.bool)
|
80 |
+
mask[0] = True
|
81 |
+
mask[list(self.group_ids[31].values())] = True
|
82 |
+
logits_g = logits[:,:,mask]
|
83 |
+
pred_ids = torch.argmax(logits_g,dim=-1)
|
84 |
+
pred_ids = pred_ids.cpu().apply_(lambda x: self.group_ids[31].get(x,x))
|
85 |
+
pred = self.processor.batch_decode(pred_ids,spaces_between_special_tokens=True)[0]
|
86 |
+
pred = pred.replace('p_','+').replace('n_', '-')
|
87 |
+
return({"text":pred})
|
88 |
+
|
preprocessor_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"return_attention_mask": false,
|
8 |
+
"sampling_rate": 16000
|
9 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c0275f1c9c019e5543cf3a95143aa762f1500b75b661bdf970b4ec7e221d363c
|
3 |
+
size 1262214769
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<pad>": 0, "n_affricate": 1, "n_alveolar": 2, "n_anterior": 3, "n_approximant": 4, "n_back": 5, "n_bilabial": 6, "n_central": 7, "n_consonant": 8, "n_continuant": 9, "n_coronal": 10, "n_dental": 11, "n_diphthong": 12, "n_dorsal": 13, "n_fricative": 14, "n_front": 15, "n_glottal": 16, "n_high_v": 17, "n_labial": 18, "n_liquid": 19, "n_long": 20, "n_low": 21, "n_mid": 22, "n_monophthong": 23, "n_nasal": 24, "n_palatal": 25, "n_posterior": 26, "n_retroflex": 27, "n_round": 28, "n_semivowel": 29, "n_short": 30, "n_sonorant": 31, "n_stop": 32, "n_velar": 33, "n_voiced": 34, "n_vowel": 35, "p_affricate": 36, "p_alveolar": 37, "p_anterior": 38, "p_approximant": 39, "p_back": 40, "p_bilabial": 41, "p_central": 42, "p_consonant": 43, "p_continuant": 44, "p_coronal": 45, "p_dental": 46, "p_diphthong": 47, "p_dorsal": 48, "p_fricative": 49, "p_front": 50, "p_glottal": 51, "p_high_v": 52, "p_labial": 53, "p_liquid": 54, "p_long": 55, "p_low": 56, "p_mid": 57, "p_monophthong": 58, "p_nasal": 59, "p_palatal": 60, "p_posterior": 61, "p_retroflex": 62, "p_round": 63, "p_semivowel": 64, "p_short": 65, "p_sonorant": 66, "p_stop": 67, "p_velar": 68, "p_voiced": 69, "p_vowel": 70}
|