from langchain import LLMChain
from langchain.chains import create_sql_query_chain
from langchain_community.agent_toolkits import create_sql_agent
from langchain_community.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.sql import SQLDatabaseChain
from utils.QwenLM import QwenLM
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain_community.agent_toolkits.sql.prompt import SQL_FUNCTIONS_SUFFIX
from langchain_core.messages import AIMessage, SystemMessage
from langchain.agents.agent import AgentExecutor
from langchain_core.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    MessagesPlaceholder,
)

from config.index import (
    MODEL_NAME
)
llm = QwenLM(model_path = MODEL_NAME)
db = SQLDatabase.from_uri("mysql://root:GXJKbb001@localhost:3306/prisma")

_DEFAULT_TEMPLATE = """你是一个sql大神，给定一个输入问题, 根据输入的问题，分布拆解成若干个子任务，输出子任务，最终创建一个正确的 {dialect} 来运行, 然后查看查询结果并返回答案.
使用以下格式来回答我的问题:

问题: ""
SQL语句: ""
返回结果: ""
回答: ""

只允许用以下表:

{table_info}

问题: {input}"""

PROMPT = PromptTemplate(
    input_variables=["input", "table_info", "dialect"], template=_DEFAULT_TEMPLATE
)

print('db===================================', db.get_table_info())
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True, prompt=PROMPT, use_query_checker=True, return_intermediate_steps=True)


while True:
    query = input("\n你说: ")
    if query == "exit":
        break
    # print('query===================',llm.summarize(query))
    response = db_chain.invoke(input=query)
    print(response)