Upload folder using huggingface_hub
Browse files- .gitattributes +10 -27
- README.md +123 -0
- added_tokens.json +4 -0
- alphabet.json +1 -0
- config.json +76 -0
- flax_model.msgpack +3 -0
- language_model/attrs.json +1 -0
- language_model/unigrams.txt +0 -0
- preprocessor_config.json +10 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +6 -0
- tokenizer_config.json +48 -0
- training_args.bin +3 -0
- vocab.json +82 -0
.gitattributes
CHANGED
@@ -1,35 +1,18 @@
|
|
1 |
-
*.
|
2 |
-
*.
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.
|
25 |
-
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
4 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
11 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
14 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
15 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
16 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
18 |
+
language_model/gujarati_try.binary filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: gu
|
3 |
+
datasets:
|
4 |
+
- openslr
|
5 |
+
metrics:
|
6 |
+
- wer
|
7 |
+
tags:
|
8 |
+
- audio
|
9 |
+
- automatic-speech-recognition
|
10 |
+
- speech
|
11 |
+
- xlsr-fine-tuning-week
|
12 |
+
license: apache-2.0
|
13 |
+
model-index:
|
14 |
+
- name: XLSR Wav2Vec2 Large 53 Gujarati by Gunjan Chhablani
|
15 |
+
results:
|
16 |
+
- task:
|
17 |
+
name: Speech Recognition
|
18 |
+
type: automatic-speech-recognition
|
19 |
+
dataset:
|
20 |
+
name: OpenSLR gu
|
21 |
+
type: openslr
|
22 |
+
metrics:
|
23 |
+
- name: Test WER
|
24 |
+
type: wer
|
25 |
+
value: 23.55
|
26 |
+
---
|
27 |
+
|
28 |
+
# Wav2Vec2-Large-XLSR-53-Gujarati
|
29 |
+
|
30 |
+
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Gujarati using the [OpenSLR SLR78](http://openslr.org/78/) dataset. When using this model, make sure that your speech input is sampled at 16kHz.
|
31 |
+
|
32 |
+
## Usage
|
33 |
+
|
34 |
+
The model can be used directly (without a language model) as follows, assuming you have a dataset with Gujarati `sentence` and `path` fields:
|
35 |
+
|
36 |
+
```python
|
37 |
+
import torch
|
38 |
+
import torchaudio
|
39 |
+
from datasets import load_dataset
|
40 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
41 |
+
|
42 |
+
# test_dataset = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET.
|
43 |
+
# For sample see the Colab link in Training Section.
|
44 |
+
|
45 |
+
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
|
46 |
+
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
|
47 |
+
|
48 |
+
resampler = torchaudio.transforms.Resample(48_000, 16_000) # The original data was with 48,000 sampling rate. You can change it according to your input.
|
49 |
+
|
50 |
+
# Preprocessing the datasets.
|
51 |
+
# We need to read the audio files as arrays
|
52 |
+
def speech_file_to_array_fn(batch):
|
53 |
+
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
54 |
+
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
55 |
+
return batch
|
56 |
+
|
57 |
+
test_dataset_eval = test_dataset_eval.map(speech_file_to_array_fn)
|
58 |
+
inputs = processor(test_dataset_eval["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
|
59 |
+
|
60 |
+
with torch.no_grad():
|
61 |
+
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
|
62 |
+
|
63 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
64 |
+
|
65 |
+
print("Prediction:", processor.batch_decode(predicted_ids))
|
66 |
+
print("Reference:", test_dataset_eval["sentence"][:2])
|
67 |
+
```
|
68 |
+
|
69 |
+
|
70 |
+
## Evaluation
|
71 |
+
|
72 |
+
The model can be evaluated as follows on 10% of the Marathi data on OpenSLR.
|
73 |
+
|
74 |
+
```python
|
75 |
+
import torch
|
76 |
+
import torchaudio
|
77 |
+
from datasets import load_dataset, load_metric
|
78 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
|
79 |
+
import re
|
80 |
+
|
81 |
+
# test_dataset = #TODO: WRITE YOUR CODE TO LOAD THE TEST DATASET. For sample see the Colab link in Training Section.
|
82 |
+
|
83 |
+
wer = load_metric("wer")
|
84 |
+
|
85 |
+
processor = Wav2Vec2Processor.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
|
86 |
+
model = Wav2Vec2ForCTC.from_pretrained("gchhablani/wav2vec2-large-xlsr-gu")
|
87 |
+
model.to("cuda")
|
88 |
+
|
89 |
+
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\–\…\'\_\’]'
|
90 |
+
resampler = torchaudio.transforms.Resample(48_000, 16_000)
|
91 |
+
|
92 |
+
# Preprocessing the datasets.
|
93 |
+
# We need to read the audio files as arrays
|
94 |
+
def speech_file_to_array_fn(batch):
|
95 |
+
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
|
96 |
+
speech_array, sampling_rate = torchaudio.load(batch["path"])
|
97 |
+
batch["speech"] = resampler(speech_array).squeeze().numpy()
|
98 |
+
return batch
|
99 |
+
|
100 |
+
test_dataset = test_dataset.map(speech_file_to_array_fn)
|
101 |
+
|
102 |
+
# Preprocessing the datasets.
|
103 |
+
# We need to read the aduio files as arrays
|
104 |
+
def evaluate(batch):
|
105 |
+
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
|
106 |
+
with torch.no_grad():
|
107 |
+
logits = model(inputs.input_values.to("cuda"),
|
108 |
+
attention_mask=inputs.attention_mask.to("cuda")).logits
|
109 |
+
pred_ids = torch.argmax(logits, dim=-1)
|
110 |
+
batch["pred_strings"] = processor.batch_decode(pred_ids)
|
111 |
+
return batch
|
112 |
+
|
113 |
+
result = test_dataset.map(evaluate, batched=True, batch_size=8)
|
114 |
+
|
115 |
+
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
|
116 |
+
```
|
117 |
+
|
118 |
+
**Test Result**: 23.55 %
|
119 |
+
|
120 |
+
## Training
|
121 |
+
|
122 |
+
90% of the OpenSLR Gujarati Male+Female dataset was used for training, after removing few examples that contained Roman characters.
|
123 |
+
The colab notebook used for training can be found [here](https://colab.research.google.com/drive/1fRQlgl4EPR4qKGScgza3MpWgbL5BeWtn?usp=sharing).
|
added_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</s>": 81,
|
3 |
+
"<s>": 80
|
4 |
+
}
|
alphabet.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"labels": ["\u0a8f", "\u0a82", "\u0a88", "\u0ac8", "\u0ac0", "\u0aa3", "\u0ab9", "\u0ae6", "\u0a93", "\u0ac5", "\u0aaf", "\u0aa4", "\u0a8d", "\u0ab7", "\u0a94", "\u0aa0", "\u0a89", "\u0a9b", "\u0aae", "\u0a86", "\u0aa8", "\u0abc", "\u0a95", "\u0ab2", "\u0acb", "\u0aea", "\u0ae8", "\u0aeb", "\u0aa1", "\u0aab", "\u0a98", "\u0aed", "\u0aee", "\u0abe", "\u0acd", "\u0a81", "\u0a90", "\u0a9d", "\u0ac3", "\u200d", "\u0aa2", "\u0a83", "\u0ab5", " ", "\u0a9c", "\u0aaa", "\u0abf", "\u0a97", "\u0aa5", "\u0aef", "\u0acc", "\u0a91", "\u0ac7", "\u0ae0", "\u0a96", "\u0a87", "\u0a85", "\u0a8b", "\u0ae9", "\u0ac1", "\u0aac", "\u0ab3", "\u0ac9", "\u0aec", "\u200c", "\u0aa6", "\u0ac2", "\u0a8a", "\u0ae2", "\u0ab8", "\u0ab6", "\u0ae7", "\u0a9a", "\u0aa7", "\u0ab0", "\u0a9e", "\u0a9f", "\u0aad", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
|
config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-large-xlsr-53",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForCTC"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"conv_bias": true,
|
11 |
+
"conv_dim": [
|
12 |
+
512,
|
13 |
+
512,
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512
|
19 |
+
],
|
20 |
+
"conv_kernel": [
|
21 |
+
10,
|
22 |
+
3,
|
23 |
+
3,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
2,
|
27 |
+
2
|
28 |
+
],
|
29 |
+
"conv_stride": [
|
30 |
+
5,
|
31 |
+
2,
|
32 |
+
2,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2
|
37 |
+
],
|
38 |
+
"ctc_loss_reduction": "mean",
|
39 |
+
"ctc_zero_infinity": false,
|
40 |
+
"do_stable_layer_norm": true,
|
41 |
+
"eos_token_id": 2,
|
42 |
+
"feat_extract_activation": "gelu",
|
43 |
+
"feat_extract_dropout": 0.0,
|
44 |
+
"feat_extract_norm": "layer",
|
45 |
+
"feat_proj_dropout": 0.0,
|
46 |
+
"final_dropout": 0.0,
|
47 |
+
"gradient_checkpointing": true,
|
48 |
+
"hidden_act": "gelu",
|
49 |
+
"hidden_dropout": 0.1,
|
50 |
+
"hidden_size": 1024,
|
51 |
+
"initializer_range": 0.02,
|
52 |
+
"intermediate_size": 4096,
|
53 |
+
"layer_norm_eps": 1e-05,
|
54 |
+
"layerdrop": 0.1,
|
55 |
+
"mask_channel_length": 10,
|
56 |
+
"mask_channel_min_space": 1,
|
57 |
+
"mask_channel_other": 0.0,
|
58 |
+
"mask_channel_prob": 0.0,
|
59 |
+
"mask_channel_selection": "static",
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_min_space": 1,
|
64 |
+
"mask_time_other": 0.0,
|
65 |
+
"mask_time_prob": 0.05,
|
66 |
+
"mask_time_selection": "static",
|
67 |
+
"model_type": "wav2vec2",
|
68 |
+
"num_attention_heads": 16,
|
69 |
+
"num_conv_pos_embedding_groups": 16,
|
70 |
+
"num_conv_pos_embeddings": 128,
|
71 |
+
"num_feat_extract_layers": 7,
|
72 |
+
"num_hidden_layers": 24,
|
73 |
+
"pad_token_id": 79,
|
74 |
+
"transformers_version": "4.5.0.dev0",
|
75 |
+
"vocab_size": 80
|
76 |
+
}
|
flax_model.msgpack
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfc30369cb6d5846ce73104b7f6867bc00352eea0318cfdc3cbbe860881f8b84
|
3 |
+
size 135
|
language_model/attrs.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
|
language_model/unigrams.txt
ADDED
File without changes
|
preprocessor_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
8 |
+
"return_attention_mask": true,
|
9 |
+
"sampling_rate": 16000
|
10 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14d3e31303a48bced2db54cad006a078f9955ee42b9fabc7a9ca51da9e82d4ff
|
3 |
+
size 1262261847
|
special_tokens_map.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"eos_token": "</s>",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"unk_token": "[UNK]"
|
6 |
+
}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"78": {
|
4 |
+
"content": "[UNK]",
|
5 |
+
"lstrip": true,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": true,
|
8 |
+
"single_word": false,
|
9 |
+
"special": false
|
10 |
+
},
|
11 |
+
"79": {
|
12 |
+
"content": "[PAD]",
|
13 |
+
"lstrip": true,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": true,
|
16 |
+
"single_word": false,
|
17 |
+
"special": false
|
18 |
+
},
|
19 |
+
"80": {
|
20 |
+
"content": "<s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"81": {
|
28 |
+
"content": "</s>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
}
|
35 |
+
},
|
36 |
+
"bos_token": "<s>",
|
37 |
+
"clean_up_tokenization_spaces": true,
|
38 |
+
"do_lower_case": false,
|
39 |
+
"eos_token": "</s>",
|
40 |
+
"model_max_length": 1000000000000000019884624838656,
|
41 |
+
"pad_token": "[PAD]",
|
42 |
+
"processor_class": "Wav2Vec2ProcessorWithLM",
|
43 |
+
"replace_word_delimiter_char": " ",
|
44 |
+
"target_lang": null,
|
45 |
+
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
46 |
+
"unk_token": "[UNK]",
|
47 |
+
"word_delimiter_token": "|"
|
48 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9cedeed8072ec2cf45c9812dafcd6855a08d7f57102501d8921dbe318e40fdc
|
3 |
+
size 2287
|
vocab.json
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"[PAD]": 79,
|
3 |
+
"[UNK]": 78,
|
4 |
+
"|": 43,
|
5 |
+
"ઁ": 35,
|
6 |
+
"ં": 1,
|
7 |
+
"ઃ": 41,
|
8 |
+
"અ": 56,
|
9 |
+
"આ": 19,
|
10 |
+
"ઇ": 55,
|
11 |
+
"ઈ": 2,
|
12 |
+
"ઉ": 16,
|
13 |
+
"ઊ": 67,
|
14 |
+
"ઋ": 57,
|
15 |
+
"ઍ": 12,
|
16 |
+
"એ": 0,
|
17 |
+
"ઐ": 36,
|
18 |
+
"ઑ": 51,
|
19 |
+
"ઓ": 8,
|
20 |
+
"ઔ": 14,
|
21 |
+
"ક": 22,
|
22 |
+
"ખ": 54,
|
23 |
+
"ગ": 47,
|
24 |
+
"ઘ": 30,
|
25 |
+
"ચ": 72,
|
26 |
+
"છ": 17,
|
27 |
+
"જ": 44,
|
28 |
+
"ઝ": 37,
|
29 |
+
"ઞ": 75,
|
30 |
+
"ટ": 76,
|
31 |
+
"ઠ": 15,
|
32 |
+
"ડ": 28,
|
33 |
+
"ઢ": 40,
|
34 |
+
"ણ": 5,
|
35 |
+
"ત": 11,
|
36 |
+
"થ": 48,
|
37 |
+
"દ": 65,
|
38 |
+
"ધ": 73,
|
39 |
+
"ન": 20,
|
40 |
+
"પ": 45,
|
41 |
+
"ફ": 29,
|
42 |
+
"બ": 60,
|
43 |
+
"ભ": 77,
|
44 |
+
"મ": 18,
|
45 |
+
"ય": 10,
|
46 |
+
"ર": 74,
|
47 |
+
"લ": 23,
|
48 |
+
"ળ": 61,
|
49 |
+
"વ": 42,
|
50 |
+
"શ": 70,
|
51 |
+
"ષ": 13,
|
52 |
+
"સ": 69,
|
53 |
+
"હ": 6,
|
54 |
+
"઼": 21,
|
55 |
+
"ા": 33,
|
56 |
+
"િ": 46,
|
57 |
+
"ી": 4,
|
58 |
+
"ુ": 59,
|
59 |
+
"ૂ": 66,
|
60 |
+
"ૃ": 38,
|
61 |
+
"ૅ": 9,
|
62 |
+
"ે": 52,
|
63 |
+
"ૈ": 3,
|
64 |
+
"ૉ": 62,
|
65 |
+
"ો": 24,
|
66 |
+
"ૌ": 50,
|
67 |
+
"્": 34,
|
68 |
+
"ૠ": 53,
|
69 |
+
"ૢ": 68,
|
70 |
+
"૦": 7,
|
71 |
+
"૧": 71,
|
72 |
+
"૨": 26,
|
73 |
+
"૩": 58,
|
74 |
+
"૪": 25,
|
75 |
+
"૫": 27,
|
76 |
+
"૬": 63,
|
77 |
+
"૭": 31,
|
78 |
+
"૮": 32,
|
79 |
+
"૯": 49,
|
80 |
+
"": 64,
|
81 |
+
"": 39
|
82 |
+
}
|