ChatBot / app.py
chatbytes's picture
Update app.py
b9bc5cd verified
import gradio as gr
import PyPDF2
from secret1 import GOOGLE_API as google_api
from langchain.llms import GooglePalm
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import GooglePalmEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader
from langchain.chains import RetrievalQA
import google.generativeai as genai
# Define chatbot response function
def chatbot_response(user_input):
# Example: returning a placeholder response, update with actual chatbot logic
bot_response = "You said: " + user_input
# history.append((user_input, bot_response))
return "hii"
# Define text splitter function
def text_splitter_function(text):
text_splitter = CharacterTextSplitter(
separator = '\n',
chunk_size = 1000,
chunk_overlap = 40,
length_function = len,
)
texts = text_splitter.split_text(text)
return texts
# Helper function for text processing
def helper(text_splitter):
db = FAISS.from_texts(text_splitter, embeddings) # Use 'embeddings' for FAISS
return 'hi'
# PDF text extraction function
def text_extract(file):
pdf_reader = PyPDF2.PdfReader(file.name)
num_pages = len(pdf_reader.pages)
text = ""
for page_num in range(num_pages):
page = pdf_reader.pages[page_num]
text += page.extract_text() or ""
# text_splitter = text_splitter_function(text)
# embeddings = GooglePalmEmbeddings(google_api_key=google_api)
# print(embeddings)
# db = FAISS.from_texts(text_splitter, embeddings)
# print(db)
# retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 2})
# llm=GooglePalm(google_api_key=google_api)
# qa = RetrievalQA.from_chain_type(
# llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True
# )
# result1 = qa.invoke(({"how r u"}))
# print("FitBot:",result1['result'])
# Split extracted text into chunks
# result = helper(text_splitter) # Call helper to process text chunks
client = genai.Client(api_key="AIzaSyBaY8zx4ak0t4TkBp28lL2hLqREzlN_Mb0",location='us-central1')
response = client.models.generate_content(
model="gemini-2.0-flash", contents=f"you will be given the input data you have to answer the question according to the user input : {text}"
)
return print(response.text)
# Define Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Chat with ChatGPT-like Interface")
output = gr.Textbox(label="Output Box")
state = gr.State([])
with gr.Row():
with gr.Column():
user_input = gr.Textbox(show_label=False, placeholder="Type your message here...")
send_btn = gr.Button("Send")
with gr.Column():
input_file = gr.File(label="Upload PDF", file_count="single")
submit_btn = gr.Button("Submit")
# Connect submit button to text_extract function
submit_btn.click(text_extract, inputs=input_file, outputs=output)
# Connect send button to chatbot_response function
send_btn.click(chatbot_response, inputs=user_input, outputs=output)
# Initialize embeddings and launch the app
if __name__ == "__main__":
# Replace with your actual Google API key
demo.launch()