import os
from operator import itemgetter
from typing import Any, Dict

from langchain_core.memory import BaseMemory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough, RunnableLambda, RunnableConfig
from langchain.memory import ConversationBufferWindowMemory
from langchain_community.chat_message_histories import FileChatMessageHistory
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_core.runnables import ConfigurableField
from langsmith import RunTree
from openai.types.beta.threads import Run
from langchain_community.vectorstores import FAISS

from src.module.Ark import ArkModel

from langchain_community.chat_models.tongyi import ChatTongyi


# 1. 定义提示词
prompt = ChatPromptTemplate.from_messages([
    ('system', '你是一个聊天助手，我的名字叫做 {my_name}'),
    MessagesPlaceholder('history'),
    ('human', '{query}')]
)

# 2. 定义模型
ChatModel = ChatTongyi(model='qwen-max', api_key=os.getenv('DASHSCOPE_API_KEY'))

# 3. 输出解析
output = StrOutputParser()

def runnable_replace():

    def __start_listener(run: RunTree):
        print(run)

    def __end_listener(run: RunTree):
        print(run)

    ## 配置动态运行可替换参数
    ConfigModel = ChatModel.configurable_alternatives(
        ConfigurableField(
            id="model",
        ),
        default_key='qwen-max',
        ark=ArkModel().model ## id 是随便定义的，configurable_alternatives 会根据你传递的key的value 来确定你传递的是什么类型，如这里写的是 model。那么在下面替换的时候，就会直接替换model
    )

    Model_With = ConfigModel.with_fallbacks([ArkModel().model]).with_retry(stop_after_attempt=3) ## 链兜底，表示当这个模型链出现问题的时候，采用采用传递的列表中的链、错误兜底

    Fallbacks_Model = Model_With.with_listeners(on_start=__start_listener, on_end=__end_listener)

    ## 动态替换运行时的模型
    chain = RunnablePassthrough.assign(history=lambda x: [], my_name=lambda  x:'小新') | prompt | Fallbacks_Model | output
    response = chain.with_config(configurable={'model': 'ark'}).invoke({'query': '你是什么模型？'})
    print(response)

def memory_handler():

    def __load_memory(ipt: Dict[str, Any], config: RunnableConfig) -> Dict[str, Any]:
        memory_cls = config.get('configurable', {}).get('memory', None)
        if memory_cls is not None and isinstance(memory_cls, BaseMemory):
            return memory_cls.load_memory_variables(ipt)

        return { 'history': [] }

    def __save_memory(run: RunTree) -> None:
        memory.save_context(run.inputs, run.outputs)

    """ ------------------ 底层实现，这种方式最新版本已弃用 """
    # 4. 添加记忆, 窗口条数记忆，也可以更换为总结记忆组件，但是需要传递大模型进行总结。
    memory = ConversationBufferWindowMemory(
        k=3,
        input_key='query',
        output_key='output',
        return_messages=True,
        chat_memory=FileChatMessageHistory('./history.txt')
    )

    # 5. 构建链
    # chain = RunnablePassthrough.assign(my_name=lambda x: '小新', history=RunnableLambda(memory.load_memory_variables) | itemgetter('history')) | prompt | ChatModel | output
    chain = RunnablePassthrough.assign(my_name=lambda x: '小新', history=RunnableLambda(__load_memory) | itemgetter('history')) | prompt | ChatModel | output

    ## memory 通过传递memory 实例的方式来实现记忆的存储
    with_chain = chain.with_listeners(on_end=__save_memory).with_config(configurable={'memory': memory})
    content = with_chain.invoke({ 'query': '我的上一个问题是什么?' })

    # 6. 当输出完成后，将数据内容保存
    # memory.save_context(inputs, { 'output': content })
    print(content)

    """ -------------------- 封装实现 看 MemoryHistory  文件实现demo """
    pass

memory_handler()