Spaces:
Runtime error
Runtime error
import gradio as gr | |
import langchain.llms as llms | |
import langchain.llms.prompts as prompts | |
import langchain.pipelines as pipelines | |
from langchain.llms.responses import ResponseItem | |
def process_pdf(pdf_file): | |
"""Processes the uploaded PDF using a pre-trained information extraction pipeline. | |
Args: | |
pdf_file (bytes): The uploaded PDF file content. | |
Returns: | |
dict: A dictionary containing the extracted information from the PDF. | |
""" | |
# Replace with your preferred information extraction pipeline | |
# (e.g., Camelot, PyMuPDF, PDFMiner.Six) | |
extracted_data = extract_information_from_pdf(pdf_file) | |
return extracted_data | |
def answer_question(question, context, llm): | |
"""Answers the user's question using the provided context and LLaMA3 model. | |
Args: | |
question (str): The user's question. | |
context (dict): The extracted information from the PDF. | |
llm (llms.BaseLLM): The LLaMA3 language model instance. | |
Returns: | |
ResponseItem: A ResponseItem object containing the answer, score, and retrieval. | |
""" | |
# Replace with your preferred RAG prompt template | |
# (e.g., "The document says [RETRIEVAL]. Can you answer this question based on it: {question}?") | |
prompt = prompts.get_rag_answering_prompt(question, context, retrieval_template="") | |
response = llm.run(prompt, wait_for_sequences=True) | |
return response[0] | |
def chatbot(pdf_file, message, chat_history): | |
"""Handles user interaction, processes PDF, answers questions, and maintains chat history. | |
Args: | |
pdf_file (bytes, optional): The uploaded PDF file content (if applicable). | |
message (str): The user's message (question). | |
chat_history (list): A list of previous messages. | |
Returns: | |
list: An updated chat history with the chatbot's response. | |
""" | |
if pdf_file is not None: | |
context = process_pdf(pdf_file) | |
chat_history.append(f"**You uploaded a PDF.**") | |
if message: | |
# Access the LLaMA3 model (replace with your setup) | |
llm = llms.get_llm("facebook/bart-base") # Example LLaMA3 model (replace with your access) | |
response = answer_question(message, context, llm) | |
chat_history.append(f"**User:** {message}") | |
chat_history.append(f"**Chatbot:** {response.generated_text}") | |
return chat_history | |
# Gradio interface setup | |
interface = gr.Interface( | |
fn=chatbot, | |
inputs=[ | |
gr.File(type="pdf", label="Upload PDF (optional)"), | |
gr.Textbox(label="Ask a question"), | |
], | |
outputs=gr.Textbox(multiline=True), | |
title="PDF Q&A Chatbot with LLaMA3", | |
description="Ask questions about the uploaded PDF or provide an empty file to use example content.", | |
) | |
interface.launch() |