mohamedemam
commited on
Commit
•
0c143aa
1
Parent(s):
4496951
Update README.md
Browse files
README.md
CHANGED
@@ -141,4 +141,44 @@ this are promted i use
|
|
141 |
>
|
142 |
# orignal model info
|
143 |
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/flan2_architecture.jpg"
|
144 |
-
alt="drawing" width="600"/>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
>
|
142 |
# orignal model info
|
143 |
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/flan2_architecture.jpg"
|
144 |
+
alt="drawing" width="600"/>
|
145 |
+
# Code
|
146 |
+
```python
|
147 |
+
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
148 |
+
model_name="mohamedemam/Question_generator"
|
149 |
+
def generate_question_answer(context, prompt, model_name="mohamedemam/Question_generator"):
|
150 |
+
"""
|
151 |
+
Generates a question-answer pair using the provided context, prompt, and model.
|
152 |
+
|
153 |
+
Args:
|
154 |
+
context: String containing the text or URL of the source material.
|
155 |
+
prompt: String starting with a question word (e.g., "what," "who").
|
156 |
+
model_name: Optional string specifying the model name (default: google/flan-t5-base).
|
157 |
+
|
158 |
+
Returns:
|
159 |
+
A tuple containing the generated question and answer strings.
|
160 |
+
"""
|
161 |
+
|
162 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
163 |
+
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
164 |
+
|
165 |
+
inputs = tokenizer(context, return_tensors="pt")
|
166 |
+
with torch.no_grad():
|
167 |
+
outputs = model(**inputs)
|
168 |
+
|
169 |
+
start_scores, end_scores = outputs.start_logits, outputs.end_logits
|
170 |
+
answer_start = torch.argmax(start_scores)
|
171 |
+
answer_end = torch.argmax(end_scores) + 1 # Account for inclusive end index
|
172 |
+
|
173 |
+
answer = tokenizer.convert_tokens_to_strings(tokenizer.convert_ids_to_tokens(inputs["input_ids"][answer_start:answer_end]))[0]
|
174 |
+
question = f"{prompt} {answer}" # Formulate the question using answer
|
175 |
+
|
176 |
+
return question, answer
|
177 |
+
|
178 |
+
# Example usage
|
179 |
+
context = "The capital of France is Paris."
|
180 |
+
prompt = "What"
|
181 |
+
question, answer = generate_question_answer(context, prompt)
|
182 |
+
print(f"Question: {question}")
|
183 |
+
print(f"Answer: {answer}")
|
184 |
+
```
|