import torch import streamlit as st from transformers import BertTokenizer, BertModel # Load the pre-trained model and tokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained("bert-base-uncased") def answer_query(question, context): # Preprocess the question and context using the tokenizer inputs = tokenizer(question, context, return_tensors="pt") # Use the model to get the answer with torch.no_grad(): outputs = model(**inputs) # Access the logits from the model's output structure start_logits = outputs.hidden_states[-1][:, 0, :] # Access from hidden states end_logits = outputs.hidden_states[-1][:, 1, :] # Find the most likely answer span answer_start = torch.argmax(start_logits) answer_end = torch.argmax(end_logits) + 1 # Extract the answer from the context answer = tokenizer.convert_tokens_to_string(context)[answer_start:answer_end] return answer # Streamlit app st.title("Question Answering App") # Textbox for user query user_query = st.text_input("Enter your question:") # File uploader for context uploaded_file = st.file_uploader("Upload a context file (txt):") if uploaded_file is not None: # Read the uploaded file content context = uploaded_file.read().decode("utf-8") else: # Use default context if no file uploaded context = "This is a sample context for demonstration purposes. You can upload your own text file for context." # Answer the query if a question is provided if user_query: answer = answer_query(user_query, context) st.write(f"Answer: {answer}") else: st.write("Please enter a question.")