from threading import Event

from langchain.chains.llm import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain_community.chat_models import ChatAnthropic
from langchain_core.documents import Document
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

from aimodel import tongyi_model, embedding_model
from src.tools.dms_operator import DMSOperator
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import MarkdownTextSplitter
import tiktoken

from websocket_schemas import ChatResponse


# 假设你有多个 Markdown 字符串
# markdown_files = [
#     "# 文件1\n内容1",
#     "# 文件2\n内容2",
#     "# 文件3\n内容3",
# ]

async def file_qa(websocket, question, history_list,file_list, stop_event:Event):#async
    dms_op = DMSOperator()
    markdown_files = []
    for file_id in file_list:
        file_text = dms_op.get_fileconversion(file_id,4)
        markdown_files.append(file_text)
    # 将 Markdown 字符串合并并拆分成小段
    text_splitter = MarkdownTextSplitter(chunk_size=200, chunk_overlap=50)
    documents = [Document(page_content=doc) for md in markdown_files for doc in text_splitter.split_text(md)]
    # 计算总的 token 数量
    encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
    total_tokens = sum(len(encoding.encode(doc.page_content)) for doc in documents)
    memory = ConversationBufferWindowMemory(k=2)
    memory.output_key="result"
    print(f"token:{total_tokens}")
    for i in history_list:
        if i["role"] == "user":
            memory.chat_memory.add_user_message(i["content"])
        elif i["role"] == "bot":
            memory.chat_memory.add_user_message(i["content"])
    # 根据 token 数量判断使用的方式
    # flag =1
    if total_tokens >= 702800:
        msg = ChatResponse(sender="bot", message="超出字数限制，请将字数缩减到100w字以内再进行提问", type="stream")
        await websocket.send_json(msg.dict())
        return
    prompt = ChatPromptTemplate.from_messages(
        [
            SystemMessage(
                content="你是“新都科伦聊天助手”，能够友好的回答问题。"
            ),
            MessagesPlaceholder(variable_name="messages"),
        ]
    )
    chain = prompt | tongyi_model
    messages = []
    for i in history_list:
        if i["role"] == "user":
            messages.append(HumanMessage(i["content"]))
        elif i["role"] == "bot":
            messages.append(AIMessage(i["content"]))
    full_context = "\n\n===\n".join(markdown_files)
    # print(full_context)
    q = f"已知有以下有“===”用于分割不同的文件的文件内容：\\n\n {full_context}\n\n请根据以上文件内容和用户的问题回答，在回答的时候尽量用markdown表格形式简要回答。: {question}"
    messages.append(HumanMessage(q))
    response = chain.astream(
        {
            "messages": messages,
        }
    )
    # else:
    #     flag=2
    #     # 创建嵌入和向量存储
    #     embeddings = embedding_model
    #     vectorstore = FAISS.from_documents(documents, embeddings)
    #
    #     # 创建检索器
    #     retriever = vectorstore.as_retriever(search_type="similarity")
    #
    #     # 创建 QA 链
    #     qa_chain = RetrievalQA.from_chain_type(
    #         llm=tongyi_model,
    #         chain_type="map_reduce",
    #         retriever=retriever,
    #         return_source_documents=True,
    #         memory=memory
    #     )
    #     # 提问
    #     response = qa_chain.astream({"query": question})
    async for i in response:

        # print(i.content,end="")
        # if flag == 2:
        #     print(i)
        #     msg = ChatResponse(sender="bot", message=str(i), type="stream")
        # else:
        if stop_event.is_set():
            stop_event.clear()
            end_resp = ChatResponse(sender="bot", message="", type="end")
            await websocket.send_json(end_resp.dict())
            return
        msg = ChatResponse(sender="bot", message=str(i.content), type="stream")
        await websocket.send_json(msg.dict())
    end_resp = ChatResponse(sender="bot", message="", type="end")
    await websocket.send_json(end_resp.dict())

if __name__ == "__main__":
    websocket = ""
    question = "请总结文本的内容，回答尽量专业。"
    question2 = "请分析以上文档的差异点，给出详细的差异对比分析信息"
    history_list = []
    #file_qa(websocket,question, history_list,["3dd72e17-f9b1-4126-9a92-002d56099815"])
    file_qa(websocket, question2, history_list, ["3aeadf80-6be3-45cb-bb61-081c94aa9166","83f6f901-78a4-4476-bc85-26416fe98773"])