
from  langchain_core.tools import tool
from  langchain.tools.render import render_text_description
from langchain_core.output_parsers import JsonOutputParser,StrOutputParser
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from operator import itemgetter
from langchain_core.runnables import RunnablePassthrough,RunnableLambda,Runnable
from typing import  Union
from langchain_core.messages import messages_from_dict, messages_to_dict, AIMessage
from langchain.memory import ConversationBufferMemory,ConversationSummaryMemory
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain.chains.conversation.base import ConversationChain
from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
from langchain_community.document_loaders import PyPDFLoader,TextLoader,WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.retrieval import create_retrieval_chain


import requests
import json
import time
import pickle
import os
import numpy as np



 

# 创建llm模型
def init_model() :
    # 模型定义
    ZHIPUAI_API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'
    BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'
    return ChatOpenAI(model_name='glm-4',temperature=.7,openai_api_key=ZHIPUAI_API_KEY,base_url=BASE_PATH)
 

# 文档读入

def load_doc(filename):
    suffix = os.path.splitext(filename)[1]
    if suffix=='.pdf' :
        # from langchain_community.document_loaders import PyPDFLoader
        doc = PyPDFLoader(file_path=filename).load()
    elif suffix=='.txt' :
            # from langchain_community.document_loaders import TextLoader
            doc = TextLoader(filename,encoding="utf8").load()
    elif suffix == "" :
        # from langchain_community.document_loaders import WebBaseLoader
        doc = WebBaseLoader(filename).load()
    else :
        doc = None
    return doc
        

# 文档分割
# 查看分割大小len(doc_splited)
# 计算字数sum([len(doc.page_content) for doc in doc_splited])
def doc_splitter(doc):
    # 文档分割 from langchain.text_splitter import RecursiveCharacterTextSplitter
    text_spliter = RecursiveCharacterTextSplitter(chunk_size=3000,chunk_overlap=0,separators=["\n\n","\n"])
    return  text_spliter.split_documents(documents= doc)

# 文档向量化
def embedding_doc():
    # pip install   sentence_transformers 
    # from langchain_community.embeddings import HuggingFaceEmbeddings
    os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
    return HuggingFaceEmbeddings(model_name="google-bert/bert-base-chinese")


# 存储到向量化数据库
def save_db( embeddings,doc,db_name):
# # 向量数据库 pip install chromadb
    vectors = Chroma.from_documents(documents=doc,embedding=embeddings,persist_directory='./sys_cache/'+db_name)
    vectors.persist()

# 加载数据库
def load_db(embeddings,db_name):
    return Chroma(
        persist_directory='./sys_cache/'+db_name,
        embedding_function=embeddings
    )

# 构造检索式问答链
def create_ask_answer_chain(llm, vector_db,retriever,input:str):
    prompt = ChatPromptTemplate.from_template("""使用下面的语料来回答本模板最末尾的问题。如果你不知道问题的答案，直接回答不知道，禁止随意编造答案，为例保证答案尽可能简洁，你的回答必须不超过3句话，你的回答不可以带星好。请注意！每次回答结束后你都必须街上”回答完毕“作为结束与，以下是一堆问题和答案的样例：
    请问：秦始皇原名是什么
    秦始皇的原名是嬴政，回答完毕
    以下是语料：
    <context>
    {context}
    </context>
    Question:{input}""")

    doc_chain = create_stuff_documents_chain(llm=llm,prompt=prompt)
    
    re_chain = create_retrieval_chain(retriever=retriever,combine_docs_chain=doc_chain)
    response = re_chain.invoke({"input":input})
    return response["answer"]

# 构建一个检索性对话模型
def create_retriever_chat(llm,retriever=None):
    memory = ConversationBufferMemory(
        llm=llm,
        memory_key="chat_history",
        return_messages=True
    )
    
    return ConversationalRetrievalChain.from_llm(llm=llm,retriever=retriever,memory=memory)

def main():
    llm = init_model()
    file_name= "https://js.langchain.com/v0.2/docs/introduction/"
    doc = load_doc(filename=file_name)
    if doc != None:
        doc_splited = doc_splitter(doc)
        embeddings = embedding_doc()
        # embeddings.embed_query
        save_db(embeddings=embeddings,doc=doc_splited,db_name='test')
        vector_db = load_db(embeddings=embeddings,db_name='test')
        re = vector_db.as_retriever()
        # result = create_ask_answer_chain(llm=llm, vector_db=vector_db,retriever=re,input="用中文回答什么是langchain")
        qa = create_retriever_chat(llm=llm,retriever=re)
        res = qa.invoke({"question":"what is langchain"})
        print(res["answer"])
        res = qa.invoke({"question":"what is it consists of open-source libraries"})
        print(res["answer"])
        # print (result)
    else :
        print('无法加载文档：'+ file_name)  

main()


 

# 文档词向量化
# q1 = "猫"
# q2 = "狗"
# q3 = "雨"
# e1 = embeddings.embed_query(q1)
# e2 = embeddings.embed_query(q2)
# e3 = embeddings.embed_query(q3)

# #转成numpy格式方便后面计算 import numpy as np
# e1= np.array(e1)
# e2= np.array(e2)
# e3= np.array(e3)

# print(np.dot(e1,e2))
# print(np.dot(e1,e3))
# print(np.dot(e2,e3))



