from langchain.sql_database import SQLDatabase
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.sql import SQLDatabaseChain
from utils.QwenLM import QwenLM
import yaml

from langchain import FewShotPromptTemplate, PromptTemplate
from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector
from langchain.vectorstores import Chroma

from utils.index import (
    parse_example,
    write_yaml,
    load_yaml
)
from langchain_core.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    MessagesPlaceholder,
)

from config.index import (
    MODEL_NAME,
    EMBEDDING_MODEL_NAME
)
llm = QwenLM(model_path = MODEL_NAME)
db = SQLDatabase.from_uri("mysql://root:GXJKbb001@localhost:3306/prisma")


example_prompt = PromptTemplate(
    input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"],
    template="{table_info}\n\nQuestion: {input}\nSQLQuery: {sql_cmd}\nSQLResult: {sql_result}\nAnswer: {answer}",
)

examples_dict = load_yaml('./data.yaml')
print('examples_dict=============',examples_dict)

local_embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)

example_selector = SemanticSimilarityExampleSelector.from_examples(
                        # This is the list of examples available to select from.
                        examples_dict,
                        # This is the embedding class used to produce embeddings which are used to measure semantic similarity.
                        local_embeddings,
                        # This is the VectorStore class that is used to store the embeddings and do a similarity search over.
                        Chroma,  # type: ignore
                        # This is the number of examples to produce and include per prompt
                        k=min(3, len(examples_dict)),
                    )

few_shot_prompt = FewShotPromptTemplate(
    example_selector=example_selector,
    example_prompt=example_prompt,
    prefix=_sqlite_prompt + "Here are some examples:",
    suffix=PROMPT_SUFFIX,
    input_variables=["table_info", "input", "top_k"],
)
# prompt=few_shot_prompt,

db_chain = SQLDatabaseChain.from_llm(llm, db, prompt=few_shot_prompt, use_query_checker=True, verbose=True, return_intermediate_steps=True)

while True:
#    try:
#         query = input("\n你说: ")
#         if query == "exit":
#             break
#         # print('query===================',llm.summarize(query))
#         response = db_chain.invoke(input=query)
#         print(response)
#    except Exception:
#        print(Exception.args)
       
    try:
        query = input("\n你说: ")
        result = db_chain(input)
        print("*** Query succeeded")
        example = parse_example(result)
        yaml_example = yaml.dump(example, allow_unicode=True)
        write_yaml(yaml_example, './data.yaml', 'a')
    except Exception as exc:
        print("*** Query failed")
        result = {
            "query": input,
            "intermediate_steps": exc.intermediate_steps
        }
        example = parse_example(result)
        yaml_example = yaml.dump(example, allow_unicode=True)
        write_yaml(yaml_example, './data.yaml', 'a')

