from langchain_community.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
from langchain.agents import create_structured_chat_agent, AgentExecutor, tool
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
import os, json
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_core.messages import SystemMessage
#设置one-api
ONEAPI_API_BASE = "http://localhost:3000/v1"
ONEAPI_CHAT_API_KEY = "sk-oeDSXZfGcYIfESrY11F10080Fe404f178e597bDfBa9aFa58"
ONEAPI_CHAT_MODEL = "qwen-plus"
ONEAPI_EMBEDDING_API_KEY = "sk-oeDSXZfGcYIfESrY11F10080Fe404f178e597bDfBa9aFa58"
ONEAPI_EMBEDDING_MODEL = "text-embedding-v1"

chromadb_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             'chromadb')

memory_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                           'memory', 'memory_history.json')


class Master:

    def __init__(self):
        #初始化ChatOpenAI
        self.embedding_model = OpenAIEmbeddings(
            model=ONEAPI_EMBEDDING_MODEL,
            base_url=ONEAPI_API_BASE,
            api_key=ONEAPI_EMBEDDING_API_KEY,
            deployment=ONEAPI_EMBEDDING_MODEL)
        self.llm = ChatOpenAI(
            base_url=ONEAPI_API_BASE,
            api_key=ONEAPI_CHAT_API_KEY,
            model=ONEAPI_CHAT_MODEL,  # 本次使用的模型
            temperature=0,  # 发散的程度，一般为0
            timeout=None,  # 服务请求超时
            max_retries=2,  # 失败重试最大次数
        )
        #设置记忆存储键名
        self.memory_key = "chat_history_test"
        self.memory_store = []
        #设置系统提示词模板
        self.messages = [
            SystemMessagePromptTemplate.from_template("""
            你是一位得力的ai助手，可以协助用户处理问题，请依据下面的内容来回答相关问题:
            context:{context}
            """),
            # MessagesPlaceholder(variable_name=self.memory_key),
            HumanMessagePromptTemplate.from_template('{question_info}')
        ]
        self.prompt = ChatPromptTemplate.from_messages(messages=self.messages)
        self.memory = ConversationBufferMemory(memory_key=self.memory_key,
                                               return_messages=True)
        self.chroma_path_info = chromadb_path

    def load_file_name(self, file_type, file_name):
        """
        获取文本信息并进行文本切割
        Args:
            file_type (_type_): 传入文本格式，例如：pdf或者txt
            file_name (_type_): 传入文件的名称

        Returns:
            _type_: _description_
        """
        file_info = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                 'file_info', file_name)
        if file_type.lower() == "pdf":
            loader = PyPDFLoader(file_info)
        elif file_type.lower() == "docx":
            loader = TextLoader(file_info)
        load_file = loader.load()
        text_splitter = CharacterTextSplitter(
            separator='\n', chunk_size=200,
            chunk_overlap=20).split_documents(load_file)
        return text_splitter

    def retriever_file_info(self, text_splitter_info, query_info):
        """
        创建向量数据库，并获取检索后的内容
        Args:
            text_splitter_info (_type_): 文本切割信息,来源于load_file_name方法
            query_info (_type_): 要检索的文本信息

        Returns:
            _type_: _description_
        """
        # print("-->开始持久化chroma")
        vectordb_persistence = Chroma.from_documents(
            documents=text_splitter_info,
            embedding=self.embedding_model,
            #持久化数据库
            persist_directory=self.chroma_path_info)
        vectordb_persistence.persist()
        #先不做持久化
        global retriever
        vectordb = Chroma(persist_directory=self.chroma_path_info,
                          embedding_function=self.embedding_model)
        retriever = vectordb.as_retriever(search_kwargs={"k": 5},
                                          search_type="similarity")
        retriever_docs_content = retriever.get_relevant_documents(query_info)
        #查询到的结果:找到与查询结果相思的文档内容
        print("--->", retriever_docs_content)
        return retriever_docs_content

    def embed_chain(self, retriever_docs_content_info, query_info):
        #对于处理用户的查询，并基于上下文信息生成准确的回答，主要用于创建文档相关的问答链
        embed_chain = load_qa_chain(self.llm,
                                    chain_type="stuff",
                                    verbose=True,
                                    prompt=self.prompt)
        response = embed_chain.invoke({
            # 问答链（QA Chain）：通常需要 input_documents，因为它需要上下文信息来生成答案
            "input_documents": retriever_docs_content_info,
            "context": retriever_docs_content_info,
            "question_info": query_info
        })
        return response

    def chain_memory(self, input):
        prompt = ChatPromptTemplate.from_messages([
            SystemMessage(content="你是一个与人类对话的机器人。"),
            MessagesPlaceholder(variable_name="chat_history_test"),
            HumanMessagePromptTemplate.from_template("{question}"),
        ])
        llm_oneapi = ChatOpenAI(base_url=ONEAPI_API_BASE,
                                api_key=ONEAPI_CHAT_API_KEY,
                                model=ONEAPI_CHAT_MODEL,
                                temperature=0.1)
        llm_chain = LLMChain(llm=llm_oneapi, memory=self.memory, prompt=prompt)
        history = self.memory.load_memory_variables({})
        print('invoke之前：', history)
        response = llm_chain.invoke({"question": input})
        # self.memory.save_context({"input": input},
        #                          {"output": response.get("text", )})
        self.memory_store.append({
            "input": input,
            "output": response.get("text", )
        })
        with open(memory_path, 'a', encoding='utf-8') as f:
            json.dump(self.memory_store, f, ensure_ascii=False)
            f.write('\n')
        return history
