Spaces:
Sleeping
Sleeping
BusinessDev
commited on
Commit
•
b46f4f3
1
Parent(s):
a7a7e44
finalmaybe
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
|
3 |
# Import libraries from transformers
|
4 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
@@ -13,6 +14,15 @@ def answer_question(context, question):
|
|
13 |
# Encode the context and question
|
14 |
inputs = tokenizer(context, question, return_tensors="pt")
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Get answer tokens and convert them to string
|
18 |
answer = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
|
4 |
# Import libraries from transformers
|
5 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
|
|
14 |
# Encode the context and question
|
15 |
inputs = tokenizer(context, question, return_tensors="pt")
|
16 |
|
17 |
+
# Perform question answering
|
18 |
+
outputs = model(**inputs)
|
19 |
+
|
20 |
+
# Get the predicted start and end token positions
|
21 |
+
start_scores, end_scores = outputs.start_logits, outputs.end_logits
|
22 |
+
|
23 |
+
# Decode the answer based on predicted positions
|
24 |
+
answer_start = torch.argmax(start_scores)
|
25 |
+
answer_end = torch.argmax(end_scores) + 1
|
26 |
|
27 |
# Get answer tokens and convert them to string
|
28 |
answer = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
|