File size: 3,353 Bytes
fa67bc8 d78dbfd f7c0c04 7dacc63 746fbd4 0308cfc 746fbd4 0308cfc b9bc5cd 0308cfc 0a0896b 0308cfc 80d92e2 beceabe fa67bc8 0308cfc e8b031a 0308cfc e6e4b49 0308cfc e6e4b49 0308cfc b63b2a2 0308cfc 61ebcf3 7bf4e55 e4ea88e fd7cb70 f9001b1 9102a48 61ebcf3 fa67bc8 0308cfc fa67bc8 a0ea75f fa67bc8 0308cfc d051b0c 5e8595b 0308cfc 5e8595b 0308cfc a0ea75f fa67bc8 0308cfc fa67bc8 fd7cb70 fa67bc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import gradio as gr
import PyPDF2
from secret1 import GOOGLE_API as google_api
from langchain.llms import GooglePalm
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import GooglePalmEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader
from langchain.chains import RetrievalQA
import google.generativeai as genai
# Define chatbot response function
def chatbot_response(user_input):
# Example: returning a placeholder response, update with actual chatbot logic
bot_response = "You said: " + user_input
# history.append((user_input, bot_response))
return "hii"
# Define text splitter function
def text_splitter_function(text):
text_splitter = CharacterTextSplitter(
separator = '\n',
chunk_size = 1000,
chunk_overlap = 40,
length_function = len,
)
texts = text_splitter.split_text(text)
return texts
# Helper function for text processing
def helper(text_splitter):
db = FAISS.from_texts(text_splitter, embeddings) # Use 'embeddings' for FAISS
return 'hi'
# PDF text extraction function
def text_extract(file):
pdf_reader = PyPDF2.PdfReader(file.name)
num_pages = len(pdf_reader.pages)
text = ""
for page_num in range(num_pages):
page = pdf_reader.pages[page_num]
text += page.extract_text() or ""
# text_splitter = text_splitter_function(text)
# embeddings = GooglePalmEmbeddings(google_api_key=google_api)
# print(embeddings)
# db = FAISS.from_texts(text_splitter, embeddings)
# print(db)
# retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 2})
# llm=GooglePalm(google_api_key=google_api)
# qa = RetrievalQA.from_chain_type(
# llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True
# )
# result1 = qa.invoke(({"how r u"}))
# print("FitBot:",result1['result'])
# Split extracted text into chunks
# result = helper(text_splitter) # Call helper to process text chunks
client = genai.Client(api_key="AIzaSyBaY8zx4ak0t4TkBp28lL2hLqREzlN_Mb0",location='us-central1')
response = client.models.generate_content(
model="gemini-2.0-flash", contents=f"you will be given the input data you have to answer the question according to the user input : {text}"
)
return print(response.text)
# Define Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Chat with ChatGPT-like Interface")
output = gr.Textbox(label="Output Box")
state = gr.State([])
with gr.Row():
with gr.Column():
user_input = gr.Textbox(show_label=False, placeholder="Type your message here...")
send_btn = gr.Button("Send")
with gr.Column():
input_file = gr.File(label="Upload PDF", file_count="single")
submit_btn = gr.Button("Submit")
# Connect submit button to text_extract function
submit_btn.click(text_extract, inputs=input_file, outputs=output)
# Connect send button to chatbot_response function
send_btn.click(chatbot_response, inputs=user_input, outputs=output)
# Initialize embeddings and launch the app
if __name__ == "__main__":
# Replace with your actual Google API key
demo.launch()
|