
from  langchain_core.tools import tool
from  langchain.tools.render import render_text_description
from langchain_core.output_parsers import JsonOutputParser,StrOutputParser
from langchain_openai import ChatOpenAI,OpenAIEmbeddings
from langchain_core.prompts import ChatPromptTemplate,PromptTemplate
from operator import itemgetter
from langchain_core.runnables import RunnablePassthrough,RunnableLambda,Runnable
from typing import  Union
from langchain_core.messages import messages_from_dict, messages_to_dict, AIMessage
from langchain.memory import ConversationBufferMemory,ConversationSummaryMemory
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain.chains.conversation.base import ConversationChain
from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
from langchain_community.document_loaders import PyPDFLoader,TextLoader,WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.retrieval import create_retrieval_chain


import requests
import json
import time
import pickle
import os
import numpy as np



### 数据库操作

# 创建llm模型
def init_model() :
    # 模型定义
    ZHIPUAI_API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'
    BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'
    return ChatOpenAI(model_name='glm-4',temperature=.7,openai_api_key=ZHIPUAI_API_KEY,base_url=BASE_PATH)
 
# 文档向量化
def init_embeddings():
    # pip install   sentence_transformers model_name="moka-ai/m3e-base"
    # from langchain_community.embeddings import HuggingFaceEmbeddings
    os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
    return HuggingFaceEmbeddings(model_name="google-bert/bert-base-chinese")

from langchain_community.utilities import SQLDatabase
db = SQLDatabase.from_uri("sqlite:///ali_langchain.db")
def get_schema(_):
    return db.get_table_info()

def run_query(query):
    return db.run(query)

def get_sql(x):
    return x.split("```sql")[1].split("```")[0]

llm = init_model()
parser = StrOutputParser()
tmpl_sql=  PromptTemplate.from_template("请通过写sql代码来回答对应问题，并且要基于如下数据库信息：{info}\n，需要回答的问题是{question}， 注意只需要sql语句不需要其他文字，代码形式如```sql\n...\n```")
 
# sql 语句
chain_sql = {"info":get_schema,"question":RunnablePassthrough()} |tmpl_sql|llm|   parser | RunnableLambda(get_sql)
# res=chain_sql.invoke("表有多少行")
# print(res)

tmpl = PromptTemplate.from_template("请综合如下数据库信息、问题、sql代码、sql代码执行结果给出自然语言的回答，只需要给出回答，无需加其他描述性文字，数据库信息：{info}， 问题：{question},sql代码：{query},sql代码执行结果：{res}")
chain = {"info":get_schema,"question":RunnablePassthrough(),"query":chain_sql}|RunnablePassthrough.assign(res=lambda x:run_query(x["query"])) | tmpl | llm |parser 

res = chain.invoke("记录数最多的brand")

print(res)
 