Spaces:
Runtime error
Runtime error
# from langchain import OpenAI, SQLDatabase, SQLDatabaseChain | |
from langchain.agents import create_sql_agent | |
from langchain.agents.agent_toolkits import SQLDatabaseToolkit | |
from langchain.sql_database import SQLDatabase | |
from langchain.llms.openai import OpenAI | |
# from langchain.agents import AgentExecutor | |
import openai | |
import json | |
import gradio as gr | |
import boto3 | |
from langchain import OpenAI, AgentExecutor, Tool, load_tools, initialize_agent | |
from langchain.agents import AgentType | |
from langchain.agents.tools import SQLAgent | |
from langchain.memory import ReadOnlySharedMemory | |
postgres_connection_str = os.environ['POSTGRES_CONNECTION_STR'] | |
access_key = os.environ['AWS_ACCESS_KEY_ID'] | |
secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] | |
openai_api_key = os.environ['OPENAI_API_KEY'] | |
region= 'us-east-1' | |
db = SQLDatabase.from_uri(postgres_connection_str,schema='langchain_testing') | |
s3_client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key,region_name=region) | |
llm = OpenAI(temperature=0, verbose=True, openai_api_key=openai_api_key) | |
# toolkit = SQLDatabaseToolkit(db=db, llm=llm) | |
# agent_executor = create_sql_agent( | |
# llm=OpenAI(temperature=0), | |
# toolkit=toolkit, | |
# verbose=True | |
# ) | |
# def generate_response(question): | |
# prompt_template = """ | |
# Keep in mind that any site, building, or property related question should be routed to the real estate portal. | |
# Any Local or city or city-sector incentives programs are asking about the local incentives program table. | |
# Any State incentives programs are asking about the state incentives program table. | |
# When the user asks about a state incentive, don't query the local_incentives_catalog table. | |
# When the user asks about a local/city incentive, don't query the local_incentives_catalog table. | |
# If you can't find the answer, make sure to look up the program field in the local and state incentives catalogs. | |
# If your final answer is "I don't know", then respond with "Please adjust your question and try asking again." | |
# """ | |
# chain_response = agent_executor.run(question + prompt_template) | |
# # bucket_name = 'your-bucket-name' | |
# # file_name = 'flagged_text.txt' | |
# # s3_client.put_object(Body=input_text, Bucket=bucket_name, Key=file_name) | |
# return chain_response | |
# iface = gr.Interface( | |
# fn=generate_response, | |
# inputs=gr.inputs.Textbox(label='Enter your question:', default='What is the Neighborhood Credit Fund in New York City?'), | |
# outputs=gr.outputs.Textbox(label="EDai Analyst's Response:") | |
# ) | |
# iface.launch(share=True) | |
# Load the language model and tools | |
llm = OpenAI() | |
tools = load_tools(["llama_index"], llm=llm) | |
# Define the agent | |
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION) | |
# Define the Gradio interface | |
def chatbot(user_name): | |
# Define the initial message | |
message = f"Hi {user_name}, how may I help you?" | |
# Define the memory | |
memory = ReadOnlySharedMemory() | |
# Define the SQL agent | |
sql_agent = SQLAgent(database="my_database.db", table="my_table") | |
# Loop until the user ends the conversation | |
while True: | |
# Get the user's input | |
user_input = gr.text_area(message, default_value="") | |
# If the user ends the conversation, break the loop | |
if user_input.lower() in ["bye", "goodbye", "exit"]: | |
break | |
# Get the answer from the SQL agent | |
answer = sql_agent.run(user_input, memory=memory) | |
# Ask if the user needs anything else | |
message = f"{answer}\n\nDo you need anything else?" | |
# Return the final message | |
return "Goodbye!" | |
# Define the Gradio interface | |
iface = gr.Interface(fn=chatbot, inputs=["text"], outputs="text", title="LangChain Chatbot", description="A chatbot powered by LangChain and Gradio.") | |
# Launch the interface | |
iface.launch() |