Update README.md
Browse files
README.md
CHANGED
@@ -1,125 +1,108 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
```
|
21 |
from transformers import MBartForConditionalGeneration, AutoModelForSeq2SeqLM
|
22 |
from transformers import AlbertTokenizer, AutoTokenizer
|
23 |
-
|
24 |
-
tokenizer =
|
25 |
-
|
26 |
-
# Or use
|
27 |
-
|
28 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("ai4bharat/IndicBARTSS")
|
29 |
-
|
30 |
-
# Or use model = MBartForConditionalGeneration.from_pretrained("ai4bharat/IndicBARTSS")
|
31 |
-
|
32 |
# Some initial mapping
|
33 |
bos_id = tokenizer._convert_token_to_id_with_added_voc("<s>")
|
34 |
eos_id = tokenizer._convert_token_to_id_with_added_voc("</s>")
|
35 |
pad_id = tokenizer._convert_token_to_id_with_added_voc("<pad>")
|
36 |
# To get lang_id use any of ['<2as>', '<2bn>', '<2en>', '<2gu>', '<2hi>', '<2kn>', '<2ml>', '<2mr>', '<2or>', '<2pa>', '<2ta>', '<2te>']
|
37 |
-
|
38 |
-
# First tokenize the input and outputs. The format below is how IndicBARTSS was trained so the input should be "Sentence </s> <2xx>" where xx is the language code. Similarly, the output should be "<2yy> Sentence </s>".
|
39 |
inp = tokenizer("I am a boy </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids # tensor([[ 466, 1981, 80, 25573, 64001, 64004]])
|
40 |
|
41 |
-
out = tokenizer("<2hi> मैं एक लड़का हूँ </s>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids # tensor([[64006, 942, 43, 32720, 8384, 64001]])
|
42 |
-
|
43 |
-
model_outputs=model(input_ids=inp, decoder_input_ids=out[:,0:-1], labels=out[:,1:])
|
44 |
-
|
45 |
-
# For loss
|
46 |
-
model_outputs.loss ## This is not label smoothed.
|
47 |
-
|
48 |
-
# For logits
|
49 |
-
model_outputs.logits
|
50 |
-
|
51 |
# For generation. Pardon the messiness. Note the decoder_start_token_id.
|
52 |
-
|
53 |
model.eval() # Set dropouts to zero
|
54 |
-
|
55 |
-
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
56 |
-
|
57 |
-
|
58 |
# Decode to get output strings
|
59 |
-
|
60 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
61 |
-
|
62 |
print(decoded_output) # I am a boy
|
63 |
-
|
64 |
# What if we mask?
|
65 |
-
|
66 |
inp = tokenizer("I am [MASK] </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
67 |
-
|
68 |
-
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
69 |
-
|
70 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
71 |
-
|
72 |
print(decoded_output) # I am happy
|
73 |
-
|
74 |
-
inp =
|
75 |
-
|
76 |
-
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
77 |
-
|
78 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
inp = tokenizer("मला [MASK] पाहिजे </s> <2mr>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
83 |
-
|
84 |
-
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
85 |
-
|
86 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
87 |
-
|
88 |
-
print(decoded_output) # मला ओळखलं पाहिजे
|
89 |
-
|
90 |
```
|
91 |
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
```
|
114 |
-
@
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
primaryClass={cs.CL}
|
121 |
-
}
|
122 |
```
|
123 |
-
|
124 |
# License
|
125 |
The model is available under the MIT License.
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- wikibio
|
4 |
+
- multilingual
|
5 |
+
- nlp
|
6 |
+
- indicnlp
|
7 |
+
datasets:
|
8 |
+
- ai4bharat/IndicWikiBio
|
9 |
+
language:
|
10 |
+
- as
|
11 |
+
- bn
|
12 |
+
- hi
|
13 |
+
- kn
|
14 |
+
- ml
|
15 |
+
- or
|
16 |
+
- pa
|
17 |
+
- ta
|
18 |
+
- te
|
19 |
+
licenses:
|
20 |
+
- cc-by-nc-4.0
|
21 |
+
|
22 |
+
|
23 |
+
---
|
24 |
+
|
25 |
+
# MultiIndicWikiBioSS
|
26 |
+
|
27 |
+
This repository contains the [IndicBARTSS](https://huggingface.co/ai4bharat/IndicBARTSS) checkpoint finetuned on the 9 languages of [IndicWikiBio](https://huggingface.co/datasets/ai4bharat/IndicWikiBio) dataset. For finetuning details,
|
28 |
+
see the [paper](https://arxiv.org/abs/2203.05437).
|
29 |
+
|
30 |
+
|
31 |
+
## Using this model in `transformers`
|
32 |
|
33 |
```
|
34 |
from transformers import MBartForConditionalGeneration, AutoModelForSeq2SeqLM
|
35 |
from transformers import AlbertTokenizer, AutoTokenizer
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained("ai4bharat/MultiIndicParaphraseGenerationSS", do_lower_case=False, use_fast=False, keep_accents=True)
|
37 |
+
# Or use tokenizer = AlbertTokenizer.from_pretrained("ai4bharat/MultiIndicParaphraseGenerationSS", do_lower_case=False, use_fast=False, keep_accents=True)
|
38 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("ai4bharat/MultiIndicParaphraseGenerationSS")
|
39 |
+
# Or use model = MBartForConditionalGeneration.from_pretrained("ai4bharat/MultiIndicParaphraseGenerationSS")
|
|
|
|
|
|
|
|
|
|
|
40 |
# Some initial mapping
|
41 |
bos_id = tokenizer._convert_token_to_id_with_added_voc("<s>")
|
42 |
eos_id = tokenizer._convert_token_to_id_with_added_voc("</s>")
|
43 |
pad_id = tokenizer._convert_token_to_id_with_added_voc("<pad>")
|
44 |
# To get lang_id use any of ['<2as>', '<2bn>', '<2en>', '<2gu>', '<2hi>', '<2kn>', '<2ml>', '<2mr>', '<2or>', '<2pa>', '<2ta>', '<2te>']
|
45 |
+
# First tokenize the input and outputs. The format below is how IndicBART was trained so the input should be "Sentence </s> <2xx>" where xx is the language code. Similarly, the output should be "<2yy> Sentence </s>".
|
|
|
46 |
inp = tokenizer("I am a boy </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids # tensor([[ 466, 1981, 80, 25573, 64001, 64004]])
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
# For generation. Pardon the messiness. Note the decoder_start_token_id.
|
|
|
49 |
model.eval() # Set dropouts to zero
|
50 |
+
model_output=model.generate(inp, use_cache=True,no_repeat_ngram_size=3,encoder_no_repeat_ngram_size=3, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
|
|
|
|
|
|
51 |
# Decode to get output strings
|
|
|
52 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
|
|
53 |
print(decoded_output) # I am a boy
|
54 |
+
# Note that if your output language is not Hindi or Marathi, you should convert its script from Devanagari to the desired language using the Indic NLP Library.
|
55 |
# What if we mask?
|
|
|
56 |
inp = tokenizer("I am [MASK] </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
57 |
+
model_output=model.generate(inp, use_cache=True,no_repeat_ngram_size=3,encoder_no_repeat_ngram_size=3, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
|
|
|
|
58 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
|
|
59 |
print(decoded_output) # I am happy
|
60 |
+
inp = tokenizer("मैं [MASK] हूठ</s> <2hi>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
61 |
+
model_output=model.generate(inp, use_cache=True, num_beams=4,no_repeat_ngram_size=3,encoder_no_repeat_ngram_size=3, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
|
|
|
|
|
|
62 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
63 |
+
print(decoded_output) # मैं जानता हूà¤
|
64 |
+
inp = tokenizer("मला [MASK] पाहिजे </s> <2mr>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
65 |
+
model_output=model.generate(inp, use_cache=True,no_repeat_ngram_size=3,encoder_no_repeat_ngram_size=3,num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
|
|
|
|
|
|
|
|
66 |
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
67 |
+
print(decoded_output) # मला ओळखलं पाहिजे
|
|
|
|
|
68 |
```
|
69 |
|
70 |
+
## Benchmarks
|
71 |
+
|
72 |
+
Scores on the `IndicWikiBio` test sets are as follows:
|
73 |
+
|
74 |
+
Language | RougeL
|
75 |
+
---------|----------------------------
|
76 |
+
as | 56.50
|
77 |
+
bn | 56.58
|
78 |
+
hi | 67.34
|
79 |
+
kn | 39.37
|
80 |
+
ml | 38.42
|
81 |
+
or | 70.71
|
82 |
+
pa | 52.78
|
83 |
+
ta | 51.11
|
84 |
+
te | 51.72
|
85 |
+
|
86 |
+
56.5
|
87 |
+
56.58
|
88 |
+
67.34
|
89 |
+
39.37
|
90 |
+
38.42
|
91 |
+
70.71
|
92 |
+
52.78
|
93 |
+
51.11
|
94 |
+
51.72
|
95 |
+
|
96 |
+
## Citation
|
97 |
+
|
98 |
+
If you use this model, please cite the following paper:
|
99 |
```
|
100 |
+
@inproceedings{Kumar2022IndicNLGSM,
|
101 |
+
title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
|
102 |
+
author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
|
103 |
+
year={2022},
|
104 |
+
url = "https://arxiv.org/abs/2203.05437"
|
105 |
+
}
|
|
|
|
|
106 |
```
|
|
|
107 |
# License
|
108 |
The model is available under the MIT License.
|