import gradio as gr from langchain_google_genai import ChatGoogleGenerativeAI from langchain.memory import ConversationBufferMemory from langchain.chains import ConversationChain from langchain.agents import AgentExecutor, Tool, ZeroShotAgent, load_tools from langchain.chains import LLMChain from langchain_community.utilities import GoogleSearchAPIWrapper import os GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") SERPAPI_API_KEY = os.getenv("SERPAPI_API_KEY") llm = ChatGoogleGenerativeAI( google_api_key=GOOGLE_API_KEY, model="gemini-pro", temperature=0.7 ) tools = load_tools(['wikipedia','llm-math'], llm=llm) #,'wikipedia',serpapi # search = GoogleSearchAPIWrapper() # tools = [ # Tool( # name="Search", # func=search.run, # description="useful for when you need to answer questions about current events", # ) # ] prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:""" suffix = """Begin!" {chat_history} Question: {input} {agent_scratchpad}""" prompt = ZeroShotAgent.create_prompt( tools, prefix=prefix, suffix=suffix, input_variables=["input", "chat_history", "agent_scratchpad"], ) memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True) llm_chain = LLMChain(llm=llm, prompt=prompt) agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) agent_chain = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=True, memory=memory ) # conversation = ConversationChain( # llm=llm, # verbose=True, # memory=ConversationBufferMemory() # ) def chat(prompt): res = agent_chain.run(input=prompt) return res,memory.load_memory_variables({})#.chat_history iface = gr.Interface( fn=chat, inputs=[gr.Textbox(lines=2, placeholder="Type your message here")], outputs=[gr.Textbox(label="Response"), gr.Textbox(label="Conversation History", lines=10)], # Adjusted for multiple lines title="Chat with Gemini-Pro", #live=True, # Enable live updates ) iface.launch(debug=True)