from camel.memories import (
    LongtermAgentMemory,
    MemoryRecord,
    ScoreBasedContextCreator,
    ChatHistoryBlock,
    VectorDBBlock,
)
from camel.messages import BaseMessage
from camel.types import ModelType, OpenAIBackendRole, ModelPlatformType
from camel.utils import OpenAITokenCounter
from camel.embeddings import SentenceTransformerEncoder
from camel.agents import ChatAgent
from camel.models import ModelFactory
import os
from dotenv import load_dotenv


def setup_memory():
    """设置并演示记忆系统"""
    print("-----------------------------------------以下是 记忆文字 embedding------------------------------------------------------------------------")

    # 1. 初始化内存系统
    memory = LongtermAgentMemory(
        context_creator=ScoreBasedContextCreator(
            token_counter=OpenAITokenCounter(ModelType.GPT_4O_MINI),
            token_limit=1024,
        ),
        chat_history_block=ChatHistoryBlock(),
        vector_db_block=VectorDBBlock(embedding=SentenceTransformerEncoder(
            model_name="sentence-transformers/all-MiniLM-L6-v2",
            model_kwargs={"trust_remote_code": True}
        )),
    )

    # 2. 创建记忆记录
    records = [
        MemoryRecord(
            message=BaseMessage.make_user_message(
                role_name="User",
                content="什么是CAMEL AI? 有哪些基于它开发的产品"
            ),
            role_at_backend=OpenAIBackendRole.USER,
        ),
        MemoryRecord(
            message=BaseMessage.make_assistant_message(
                role_name="Agent",
                content="CAMEL-AI是第一个LLM多智能体框架,并且是一个致力于寻找智能体 scaling law 的开源社区。"
            ),
            role_at_backend=OpenAIBackendRole.ASSISTANT,
        ),
        MemoryRecord(
            message=BaseMessage.make_assistant_message(
                role_name="Agent2",
                content="CAMEL-AI 是一个基础 AI 开发框架，后续 camel-ai 团队还基于 CAMEL-AI 做了很多探索项目，如 owl 和 oasis 等，最近又退出了一款 2C 产品 eigent。"
            ),
            role_at_backend=OpenAIBackendRole.ASSISTANT,
        ),
    ]

    # 3. 写入记忆
    memory.write_records(records)
    context, token_count = memory.get_context()

    print(context)
    print(f'token消耗: {token_count}')

    return memory


def setup_agent_without_memory():
    """设置没有记忆的agent"""
    print("-----------------------------------------以下是 agent 没有使用记忆------------------------------------------------------------------------")

    # 定义系统消息
    sys_msg = "你是一个好奇的智能体，正在探索宇宙的奥秘。"

    # 加载环境变量
    load_dotenv()
    api_key = os.getenv('MODELSCOPE_SDK_TOKEN')

    # 初始化agent 调用在线Qwen/Qwen2.5-72B-Instruct
    model = ModelFactory.create(
        model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
        model_type="Qwen/Qwen2.5-72B-Instruct",
        url='https://api-inference.modelscope.cn/v1/',
        api_key=api_key
    )
    agent = ChatAgent(system_message=sys_msg, model=model)

    # 定义用户消息
    usr_msg = "告诉我基于我们讨论的内容，哪个是第一个LLM多智能体框架？"

    # 发送消息给agent
    response = agent.step(usr_msg)

    # 查看响应
    print(response.msgs[0].content)

    return agent


def setup_agent_with_memory(agent, memory):
    """为agent添加记忆并测试"""
    print("\n")
    print("-----------------------------------------以下是给 agent 添加了记忆------------------------------------------------------------------------")

    # 将memory赋值给agent
    agent.memory = memory

    # 定义用户消息
    usr_msg = "告诉我基于我们讨论的内容，哪个是第一个LLM多智能体框架？"

    # 发送消息给agent
    response = agent.step(usr_msg)

    # 查看响应
    print(response.msgs[0].content + "\n")


def main():
    """主函数"""
    # 设置记忆系统
    memory = setup_memory()

    # 设置没有记忆的agent
    agent = setup_agent_without_memory()

    # 为agent添加记忆
    setup_agent_with_memory(agent, memory)


if __name__ == "__main__":
    main()