Spaces:
Sleeping
Sleeping
Update modeles.py
Browse files- modeles.py +7 -29
modeles.py
CHANGED
|
@@ -1,41 +1,19 @@
|
|
| 1 |
-
from transformers import
|
| 2 |
-
import torch
|
| 3 |
-
|
| 4 |
-
def load_and_answer(question, context, model_name):
|
| 5 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 6 |
-
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
| 7 |
-
|
| 8 |
-
# Tokenize the input question-context pair
|
| 9 |
-
inputs = tokenizer.encode_plus(question, context, max_length=512, truncation=True, padding=True, return_tensors='pt')
|
| 10 |
-
|
| 11 |
-
# Send inputs to the same device as your model
|
| 12 |
-
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
| 13 |
-
|
| 14 |
-
with torch.no_grad():
|
| 15 |
-
# Forward pass, get model outputs
|
| 16 |
-
outputs = model(**inputs)
|
| 17 |
-
|
| 18 |
-
# Extract the start and end positions of the answer in the tokens
|
| 19 |
-
answer_start_scores, answer_end_scores = outputs.start_logits, outputs.end_logits
|
| 20 |
-
answer_start_index = torch.argmax(answer_start_scores) # Most likely start of answer
|
| 21 |
-
answer_end_index = torch.argmax(answer_end_scores) + 1 # Most likely end of answer; +1 for inclusive slicing
|
| 22 |
-
|
| 23 |
-
# Convert token indices to the actual answer text
|
| 24 |
-
answer_tokens = inputs['input_ids'][0, answer_start_index:answer_end_index]
|
| 25 |
-
answer = tokenizer.decode(answer_tokens, skip_special_tokens=True)
|
| 26 |
-
return {"answer": answer, "start": answer_start_index.item(), "end": answer_end_index.item()}
|
| 27 |
|
| 28 |
def squeezebert(context, question):
|
| 29 |
# Define the specific model and tokenizer for SqueezeBERT
|
| 30 |
model_name = "ALOQAS/squeezebert-uncased-finetuned-squad-v2"
|
| 31 |
-
|
|
|
|
| 32 |
|
| 33 |
def bert(context, question):
|
| 34 |
# Define the specific model and tokenizer for BERT
|
| 35 |
model_name = "ALOQAS/bert-large-uncased-finetuned-squad-v2"
|
| 36 |
-
|
|
|
|
| 37 |
|
| 38 |
def deberta(context, question):
|
| 39 |
# Define the specific model and tokenizer for DeBERTa
|
| 40 |
model_name = "ALOQAS/deberta-large-finetuned-squad-v2"
|
| 41 |
-
|
|
|
|
|
|
| 1 |
+
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
def squeezebert(context, question):
|
| 4 |
# Define the specific model and tokenizer for SqueezeBERT
|
| 5 |
model_name = "ALOQAS/squeezebert-uncased-finetuned-squad-v2"
|
| 6 |
+
pip = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
| 7 |
+
return pip(context=context, question=question)
|
| 8 |
|
| 9 |
def bert(context, question):
|
| 10 |
# Define the specific model and tokenizer for BERT
|
| 11 |
model_name = "ALOQAS/bert-large-uncased-finetuned-squad-v2"
|
| 12 |
+
pip = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
| 13 |
+
return pip(context=context, question=question)
|
| 14 |
|
| 15 |
def deberta(context, question):
|
| 16 |
# Define the specific model and tokenizer for DeBERTa
|
| 17 |
model_name = "ALOQAS/deberta-large-finetuned-squad-v2"
|
| 18 |
+
pip = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
| 19 |
+
return pip(context=context, question=question)
|