from operator import itemgetter

import torch
from langchain.llms import HuggingFacePipeline
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnableMap
from langchain.utilities import SQLDatabase
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

"""
本地LLM加载,使用HuggingFacePipeline连接到langchain
"""
localmodels = [
                r'E:\llama\text-generation-webui\models\Baichuan2-7B-Base',
                r'E:\llama\text-generation-webui\models\Baichuan2-7B-Chat',
                r'E:\llama\text-generation-webui\models\mistralai_Mistral-7B-v0.1',
                r'E:\llama\text-generation-webui\models\Qwen_Qwen-7B',
                r'E:\llama\text-generation-webui\models\Qwen_Qwen-14B'
                ]
modeid = localmodels[1]
print(modeid)
tokenizer = AutoTokenizer.from_pretrained(
    modeid, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    modeid, trust_remote_code=True, device_map='cuda:0',torch_dtype=torch.bfloat16)

taskid = "text2text-generation"

pipe = pipeline(
    task=taskid,
    model=model,
    tokenizer=tokenizer,
    max_length=4*1024
    # device=0
)

llm = HuggingFacePipeline(pipeline=pipe)

sqlite_db_path = r'D:\sync\private\allinone.db'

template = """Based on the table schema below, write a SQL query that would answer the user's question:
{schema}

Question: {question}
SQL Query:"""

prompt = ChatPromptTemplate.from_template(template)

db = SQLDatabase.from_uri("sqlite:///"+ sqlite_db_path)

def get_schema(_):
    return db.get_table_info()

def run_query(query):
    return db.run(query)

inputs = {
    "schema": RunnableLambda(get_schema),
    "question": itemgetter("question")
}

sql_response = (
        RunnableMap(inputs)
        | prompt
        | llm.bind(stop=["\nSQLResult:"])
        | StrOutputParser()
    )
question = "How many date are there?"
question = "有多少次约会"
# print(sql_response.invoke({"question": question}))

template = """Based on the table schema below, question, sql query, and sql response, write a natural language response:
{schema}

Question: {question}
SQL Query: {query}
SQL Response: {response}"""
prompt_response = ChatPromptTemplate.from_template(template)

full_chain = (
    RunnableMap({
        "question": itemgetter("question"),
        "query": sql_response.invoke,
    }) 
    | {
        "schema": RunnableLambda(get_schema),
        "question": itemgetter("question"),
        "query": itemgetter("query"),
        "response": run_query(itemgetter("query"))    
    } 
    | prompt_response 
)

print(full_chain.invoke({"question": question}))