Upload 9 files
Browse files- .gitattributes +34 -35
- README.md +54 -0
- config.json +75 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +16 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -1,35 +1,34 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.
|
29 |
-
*.
|
30 |
-
*.
|
31 |
-
*.
|
32 |
-
*.
|
33 |
-
*.
|
34 |
-
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
README.md
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: gpl-3.0
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
pipeline_tag: text2text-generation
|
6 |
+
tags:
|
7 |
+
- code
|
8 |
+
- asr
|
9 |
+
- inverse text normalization
|
10 |
+
datasets:
|
11 |
+
- text-normalization-challenge-english-language
|
12 |
+
---
|
13 |
+
|
14 |
+
---
|
15 |
+
|
16 |
+
---
|
17 |
+
|
18 |
+
# asr_inverse_text_normalization
|
19 |
+
|
20 |
+
Finetuned a facebook/bart-base Pretrained model on the ASR inverse text normalization dataset by treating it as a seq2seq task. Other approaches which may be considered is by considering it as a TokenClassification task and the one mentioned here https://machinelearning.apple.com/research/inverse-text-normal.
|
21 |
+
|
22 |
+
## Model description
|
23 |
+
|
24 |
+
BART (Bidirectional and Auto-Regressive Transformers) is a pre-trained transformer-based neural network model developed by Facebook AI Research (FAIR) for various natural language processing (NLP) tasks
|
25 |
+
|
26 |
+
The BART architecture is based on the Transformer model, which is a type of neural network architecture that processes sequential input data, such as text, by applying self-attention mechanisms to capture the relationships between different words in the input sequence.
|
27 |
+
BART includes both auto-regressive and bidirectional encoder-decoder transformer architectures, which enable it to perform both generation and prediction tasks
|
28 |
+
|
29 |
+
BART was trained on a diverse range of NLP tasks, including machine translation, summarization, and question answering, and has shown strong performance across multiple benchmarks.
|
30 |
+
Its training process involves corrupting text with different types of noise and training the model to reconstruct the original text, which has been shown to improve the model's ability to generalize to new tasks and outperform other pre-trained language models like GPT and BERT
|
31 |
+
|
32 |
+
The model flavour which was chosen is that of "facebook/bart-base" and columns "after" is used as the source while "before" column is used as the targets.
|
33 |
+
|
34 |
+
## Intended uses & limitations
|
35 |
+
|
36 |
+
This model can be used as an out-of-the-box solution to the invesrse text normalization which can convert ASR generated un-normalized text such as
|
37 |
+
"my c v v for my card is five six seven and it expires on november twenty three" -> "my CVV for my card is 567 and it expires on November 23"
|
38 |
+
|
39 |
+
The model needs to be explored for various min and max length setting at the time of generation for your specific usecase
|
40 |
+
|
41 |
+
### How to use
|
42 |
+
|
43 |
+
```python
|
44 |
+
|
45 |
+
>>> from transformers import pipeline
|
46 |
+
>>> generator = pipeline(model="pavanBuduguppa/asr_inverse_text_normalization")
|
47 |
+
|
48 |
+
>>> generator("my c v v for my card is five six seven and it expires on november twenty three")
|
49 |
+
|
50 |
+
```
|
51 |
+
|
52 |
+
## Training data
|
53 |
+
|
54 |
+
All credits and rights for the training data belongs to Google. The data was merely obtained and processed for this model and the original data can be found here https://www.kaggle.com/competitions/text-normalization-challenge-english-language/data
|
config.json
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": ".",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
+
"activation_function": "gelu",
|
5 |
+
"add_bias_logits": false,
|
6 |
+
"add_final_layer_norm": false,
|
7 |
+
"architectures": [
|
8 |
+
"BartForConditionalGeneration"
|
9 |
+
],
|
10 |
+
"attention_dropout": 0.1,
|
11 |
+
"bos_token_id": 0,
|
12 |
+
"classif_dropout": 0.1,
|
13 |
+
"classifier_dropout": 0.0,
|
14 |
+
"d_model": 768,
|
15 |
+
"decoder_attention_heads": 12,
|
16 |
+
"decoder_ffn_dim": 3072,
|
17 |
+
"decoder_layerdrop": 0.0,
|
18 |
+
"decoder_layers": 6,
|
19 |
+
"decoder_start_token_id": 2,
|
20 |
+
"dropout": 0.1,
|
21 |
+
"early_stopping": true,
|
22 |
+
"encoder_attention_heads": 12,
|
23 |
+
"encoder_ffn_dim": 3072,
|
24 |
+
"encoder_layerdrop": 0.0,
|
25 |
+
"encoder_layers": 6,
|
26 |
+
"eos_token_id": 2,
|
27 |
+
"forced_bos_token_id": 0,
|
28 |
+
"forced_eos_token_id": 2,
|
29 |
+
"gradient_checkpointing": false,
|
30 |
+
"id2label": {
|
31 |
+
"0": "LABEL_0",
|
32 |
+
"1": "LABEL_1",
|
33 |
+
"2": "LABEL_2"
|
34 |
+
},
|
35 |
+
"init_std": 0.02,
|
36 |
+
"is_encoder_decoder": true,
|
37 |
+
"label2id": {
|
38 |
+
"LABEL_0": 0,
|
39 |
+
"LABEL_1": 1,
|
40 |
+
"LABEL_2": 2
|
41 |
+
},
|
42 |
+
"max_position_embeddings": 1024,
|
43 |
+
"model_type": "bart",
|
44 |
+
"no_repeat_ngram_size": 3,
|
45 |
+
"normalize_before": false,
|
46 |
+
"normalize_embedding": true,
|
47 |
+
"num_beams": 4,
|
48 |
+
"num_hidden_layers": 6,
|
49 |
+
"pad_token_id": 1,
|
50 |
+
"scale_embedding": false,
|
51 |
+
"task_specific_params": {
|
52 |
+
"summarization": {
|
53 |
+
"length_penalty": 1.0,
|
54 |
+
"max_length": 128,
|
55 |
+
"min_length": 12,
|
56 |
+
"num_beams": 4
|
57 |
+
},
|
58 |
+
"summarization_cnn": {
|
59 |
+
"length_penalty": 2.0,
|
60 |
+
"max_length": 142,
|
61 |
+
"min_length": 56,
|
62 |
+
"num_beams": 4
|
63 |
+
},
|
64 |
+
"summarization_xsum": {
|
65 |
+
"length_penalty": 1.0,
|
66 |
+
"max_length": 62,
|
67 |
+
"min_length": 11,
|
68 |
+
"num_beams": 6
|
69 |
+
}
|
70 |
+
},
|
71 |
+
"torch_dtype": "float32",
|
72 |
+
"transformers_version": "4.20.1",
|
73 |
+
"use_cache": true,
|
74 |
+
"vocab_size": 50265
|
75 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85d7ba93950c2349251eb2d0c67293260cd6ec61df0f5d48067695ee2e01cbb0
|
3 |
+
size 557912620
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4eca52066dc572f8a6ccd81ddff510bc93e4a68e83399164ef115b4610facff3
|
3 |
+
size 557965433
|
special_tokens_map.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": "<s>",
|
4 |
+
"cls_token": "<s>",
|
5 |
+
"eos_token": "</s>",
|
6 |
+
"errors": "replace",
|
7 |
+
"mask_token": "<mask>",
|
8 |
+
"model_max_length": 1024,
|
9 |
+
"name_or_path": "facebook/bart-base",
|
10 |
+
"pad_token": "<pad>",
|
11 |
+
"sep_token": "</s>",
|
12 |
+
"special_tokens_map_file": null,
|
13 |
+
"tokenizer_class": "BartTokenizer",
|
14 |
+
"trim_offsets": true,
|
15 |
+
"unk_token": "<unk>"
|
16 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|