ZhangCheng commited on
Commit
9084332
1 Parent(s): 71be6a5

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +63 -0
README.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ datasets:
4
+ - squad
5
+ tags:
6
+ - Question Generation
7
+ widget:
8
+ - text: "<answer> T5v1.1 <context> Cheng fine-tuned T5v1.1 on SQuAD for question generation."
9
+ example_title: "Example 1"
10
+ - text: "<answer> SQuAD <context> Cheng fine-tuned T5v1.1 on SQuAD dataset for question generation."
11
+ example_title: "Example 2"
12
+ - text: "<answer> thousands <context> Transformers provides thousands of pre-trained models to perform tasks on different modalities such as text, vision, and audio."
13
+ example_title: "Example 3"
14
+ ---
15
+
16
+ # T5v1.1-Base Fine-Tuned on SQuAD for Question Generation
17
+
18
+ ### Model in Action:
19
+
20
+ ```python
21
+ import torch
22
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
23
+
24
+ trained_model_path = 'ZhangCheng/T5v1.1-Base-Fine-Tuned-for-Question-Generation'
25
+ trained_tokenizer_path = 'ZhangCheng/T5v1.1-Base-Fine-Tuned-for-Question-Generation'
26
+
27
+ class QuestionGeneration:
28
+
29
+ def __init__(self):
30
+ self.model = T5ForConditionalGeneration.from_pretrained(trained_model_path)
31
+ self.tokenizer = T5Tokenizer.from_pretrained(trained_tokenizer_path)
32
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
33
+ self.model = self.model.to(self.device)
34
+ self.model.eval()
35
+
36
+ def generate(self, answer:str, context:str):
37
+ input_text = '<answer> %s <context> %s ' % (answer, context)
38
+ encoding = self.tokenizer.encode_plus(
39
+ input_text,
40
+ return_tensors='pt'
41
+ )
42
+ input_ids = encoding['input_ids'].to(self.device)
43
+ attention_mask = encoding['attention_mask'].to(self.device)
44
+ outputs = self.model.generate(
45
+ input_ids = input_ids,
46
+ attention_mask = attention_mask
47
+ )
48
+ question = self.tokenizer.decode(
49
+ outputs[0],
50
+ skip_special_tokens = True,
51
+ clean_up_tokenization_spaces = True
52
+ )
53
+ return {'question': question, 'answer': answer}
54
+
55
+ if __name__ == "__main__":
56
+ context = 'ZhangCheng fine-tuned T5v1.1 on SQuAD dataset for question generation.'
57
+ answer = 'ZhangCheng'
58
+ QG = QuestionGeneration()
59
+ qa = QG.generate(answer, context)
60
+ print(qa['question'])
61
+ # Output:
62
+ # Who fine-tuned T5v1.1 on SQuAD?
63
+ ```