Spaces:
Sleeping
Sleeping
File size: 1,438 Bytes
05281d2 72fc123 05281d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import streamlit as st
from transformers import AutoModelForQuestionAnswering, AutoTokenizer
import torch
# Load the saved model and tokenizer
@st.cache_resource
def load_model():
model_path = "bert_qa_model"
model = AutoModelForQuestionAnswering.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
return model, tokenizer
model, tokenizer = load_model()
def answer_question(context, question):
# Tokenize input
inputs = tokenizer(question, context, return_tensors="pt")
# Get model output
with torch.no_grad():
outputs = model(**inputs)
# Get start and end logits
answer_start = torch.argmax(outputs.start_logits)
answer_end = torch.argmax(outputs.end_logits) + 1
# Decode the answer
answer = tokenizer.decode(inputs["input_ids"][0][answer_start:answer_end])
return answer
# Streamlit app
st.title("Question Answering System")
st.write("Enter a context and a question, and the model will provide an answer based on the context.")
context = st.text_area("Context", height=200)
question = st.text_input("Question")
if st.button("Get Answer"):
if context and question:
answer = answer_question(context, question)
st.success(f"Answer: {answer}")
else:
st.error("Please provide both context and question.")
st.markdown("---")
st.write("Powered by Hugging Face Transformers and Streamlit") |