Spaces:
Sleeping
Sleeping
File size: 1,697 Bytes
e28f0eb 8705b8d 4265c8b 8705b8d 4265c8b f1c96e4 4265c8b 8705b8d 4265c8b e32de15 8705b8d 4265c8b e32de15 8705b8d 4265c8b 8705b8d 4265c8b e32de15 8705b8d 4265c8b e32de15 8705b8d 4265c8b 8705b8d e32de15 8705b8d e32de15 8705b8d e32de15 8705b8d e32de15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import torch
import streamlit as st
from transformers import BertTokenizer, BertForQuestionAnswering
# Utilize BertForQuestionAnswering model for direct start/end logits
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForQuestionAnswering.from_pretrained("bert-base-uncased")
def answer_query(question, context):
# Preprocess using tokenizer
inputs = tokenizer(question, context, return_tensors="pt")
# Use model for question answering
with torch.no_grad():
outputs = model(**inputs)
# Retrieve logits directly
start_logits = outputs.start_logits
end_logits = outputs.end_logits
# Find answer span
answer_start = torch.argmax(start_logits)
answer_end = torch.argmax(end_logits) + 1
# Extract answer from context
answer = tokenizer.convert_tokens_to_string(
tokenizer.convert_ids_to_tokens(inputs["input_ids"][0]) # Access original tokens
)[answer_start:answer_end]
return answer
# Streamlit app
st.title("Question Answering App")
# Textbox for user query
user_query = st.text_input("Enter your question:")
# File uploader for context
uploaded_file = st.file_uploader("Upload a context file (txt):")
if uploaded_file is not None:
# Read the uploaded file content
context = uploaded_file.read().decode("utf-8")
else:
# Use default context if no file uploaded
context = "This is a sample context for demonstration purposes. You can upload your own text file for context."
# Answer the query if a question is provided
if user_query:
answer = answer_query(user_query, context)
st.write(f"Answer: {answer}")
else:
st.write("Please enter a question.") |