'''
aivo llm
'''
from langchain_community.document_loaders import WebBaseLoader
from langchain.schema.document import Document
from langchain.docstore.document import Document as DocumentLocal
from langchain.text_splitter import RecursiveCharacterTextSplitter

from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain_community.document_loaders import TextLoader
import urllib.parse
import sys
import psutil
import os

'''
config
files:[]
webs:[]
template:""
'''

class AivoLLM:
    def __init__(self,config):
        os.environ["OPENAI_API_KEY"] = "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"
        os.environ["LANGCHAIN_TRACING_V2"] = "true"
        os.environ["LANGCHAIN_API_KEY"] = "ls__eeec96dbe37e426eba6f4964f2630fcd"
        os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
        os.environ["LANGCHAIN_PROJECT"] = "aivo"
        self.config = config
        self.setupllm()
        pass

    #目前设置为一种人共用一种llm
    def setupllm(self):
        #准备document
        documents = []
        if ('text' in self.config.keys()):
            documents.append(Document(page_content=self.config['text'], metadata={"source": "local"}))
        else:
            documents.append(Document(page_content='你是一个客服机器人', metadata={"source": "local"}))

        for file in self.config['files']:
            loader = TextLoader(file)
            documents += loader.load()
        for web in self.config['webs']:
            loader = WebBaseLoader(web)
            documents += loader.load()

        if len(documents) > 0:
            text_splitter = RecursiveCharacterTextSplitter(chunk_size = 500, chunk_overlap = 0)
            all_splits = text_splitter.split_documents(documents)
            print('文档分割结果')
            print(all_splits)
            #生成向量数据库
            self.vectorstore = Chroma.from_documents(documents=all_splits, embedding=OpenAIEmbeddings())

        #创建模板
        # self.template = urllib.parse.unquote(self.config['template'])

        #创建llm模型
        #https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html
        self.llm = ChatOpenAI(model_name="gpt-4o", temperature=0.5)

    #一个对话一个,如果有历史消息，加载历史消息
    def qa_chain(self,history_message=[],template=""):
        #创建memory
        memory = ConversationBufferMemory(memory_key="history",input_key="question")

        for msg in history_message:
            memory.chat_memory.add_user_message(msg['q'])
            memory.chat_memory.add_ai_message(msg['a'])

        template_info = "用户的个人信息：" + template + "------你是一个问答机器人------<ctx>{context}</ctx>------<hs>{history}</hs>------{question}Answer:"
        prompt = PromptTemplate(
            input_variables=["history", "context", "question"],
            template=template_info,
        )

        #创建qa_chain
        if hasattr(self,"vectorstore"):
            qa_chain = RetrievalQA.from_chain_type(
                self.llm,
                retriever=self.vectorstore.as_retriever(),
                chain_type_kwargs={
                    "prompt": prompt,
                    "memory":memory,
                })
            return qa_chain
        else:
            qa_chain = RetrievalQA.from_chain_type(
                self.llm,
                chain_type_kwargs={
                    "prompt": prompt,
                    "memory":memory,
                })
            return qa_chain
