Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForQuestionAnswering, AutoTokenizer | |
import torch | |
# Load the saved model and tokenizer | |
def load_model(): | |
model_path = "bert_qa_model" | |
model = AutoModelForQuestionAnswering.from_pretrained(model_path) | |
tokenizer = AutoTokenizer.from_pretrained(model_path) | |
return model, tokenizer | |
model, tokenizer = load_model() | |
def answer_question(context, question): | |
# Tokenize input | |
inputs = tokenizer(question, context, return_tensors="pt") | |
# Get model output | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
# Get start and end logits | |
answer_start = torch.argmax(outputs.start_logits) | |
answer_end = torch.argmax(outputs.end_logits) + 1 | |
# Decode the answer | |
answer = tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end]) | |
return answer | |
# Streamlit app | |
st.title("Question Answering System") | |
st.write("Enter a context and a question, and the model will provide an answer based on the context.") | |
context = st.text_area("Context", height=200) | |
question = st.text_input("Question") | |
if st.button("Get Answer"): | |
if context and question: | |
answer = answer_question(context, question) | |
st.success(f"Answer: {answer}") | |
else: | |
st.error("Please provide both context and question.") | |
st.markdown("---") | |
st.write("Powered by Hugging Face Transformers and Streamlit") |