# 导入所需库
from langchain_core.prompts import PromptTemplate
from langchain_deepseek import ChatDeepSeek
import os
from dotenv import load_dotenv, find_dotenv
from langchain_openai import ChatOpenAI
from langchain.agents import create_openai_tools_agent, AgentExecutor, tool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain.memory import ConversationTokenBufferMemory
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, BackgroundTasks
from interview_tools import *
import uuid
import tiktoken  # 添加 tiktoken 用于 token 计数

# 1、使用.env 存储api_key
load_dotenv()

os.environ["LANGCHAIN_TRACING_V2"]="false"
os.environ["LANGCHAIN_API_KEY"]="ls__a339aed7ea934ec999736421cdb43629"
os.environ["LANGCHAIN_PROJECT"]="samibot"

os.environ["OPENAI_API_KEY"] = "sk-83S6JB0eSdH536lC9aD1F16482C04dC69c02Ba6571B5A0Fb"
os.environ["OPENAI_API_BASE"] = "https://ai-yyds.com/v1"

os.environ["SERPAPI_API_KEY"] = "e7b15718df583476214651b32a4e835523f3bac273d511341a1de30430b2164e"
REDIS_URL = "redis://:@127.0.0.1:16379" #os.getenv("REDIS_URL")

app = FastAPI()


# 自定义 DeepSeek 类添加 token 计数方法
class CustomChatDeepSeek(ChatOpenAI):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # 使用类变量而不是实例变量
        self._encoding = None

    @property
    def encoding(self):
        """延迟加载编码器"""
        if self._encoding is None:
            self._encoding = tiktoken.get_encoding("cl100k_base")
        return self._encoding

    def get_num_tokens_from_messages(self, messages):
        """计算消息的 token 数量（兼容 OpenAI 格式）"""
        num_tokens = 0
        for message in messages:
            num_tokens += 4  # 每条消息的开销

            # 处理不同类型的消息内容
            content = message.content
            if isinstance(content, list):
                # 处理多模态内容
                for item in content:
                    if item["type"] == "text":
                        num_tokens += len(self.encoding.encode(item["text"]))
            else:
                num_tokens += len(self.encoding.encode(str(content)))

            if hasattr(message, "name") and message.name:  # 检查是否存在name属性
                num_tokens += -1  # 名字字段有额外开销
        num_tokens += 2  # 回复的开销
        return num_tokens

class InterviewServer:

    def __init__(self):
        self.chatmodel = CustomChatDeepSeek(
            api_key="sk-c40237f01d7a491d9a7e792772e9e3ca",
            base_url="https://api.deepseek.com/v1",  # 注意 /v1 路径
            model="deepseek-chat",
            temperature=0,
            streaming=True,
        )

        # self.chatmodel = CustomChatDeepSeek(
        #     model="deepseek-chat",
        #     temperature=0.1,  # 可选参数，控制生成结果的随机性
        # )

        self.QingXu = "default"
        self.MEMORY_KEY = "chat_history"
        self.SYSTEMPL = """你是一个面试官,你精通如何考察面试人员对企业需求的匹配度,并会在面试中通过不断的询问问题来达到考察的目的。"""

        self.MOODS = {
            "default": {
                "roleSet": "",
                "voiceStyle": "chat"
            },
        }

        self.prompt = ChatPromptTemplate.from_messages(
            [
                (
                    "system",
                    self.SYSTEMPL.format(who_you_are=self.MOODS[self.QingXu]["roleSet"]),
                ),
                MessagesPlaceholder(variable_name=self.MEMORY_KEY),
                (
                    "user",
                    "{input}"
                ),
                MessagesPlaceholder(variable_name="agent_scratchpad"),
            ],
        )

        tools = [test]
        agent = create_openai_tools_agent(
            llm=self.chatmodel,
            tools=tools,
            prompt=self.prompt
        )
        self.sessionId = "sessionid_123"
        self.memory = self.get_memory(self.sessionId)
        memory = ConversationTokenBufferMemory(
            llm=self.chatmodel,
            human_prefix="用户",
            ai_prefix="ergongda",
            memory_key=self.MEMORY_KEY,
            output_key="output",
            return_messages=True,
            max_token_limit=4000,
            chat_memory=self.memory,
        )
        self.agent_executor = AgentExecutor(
            agent=agent,
            tools=tools,
            memory=memory,
            verbose=True,
            handle_parsing_errors=True  # 添加错误处理
        )

    def get_memory(self, sessid : str):
        chat_message_history = RedisChatMessageHistory(
            url=REDIS_URL, session_id = sessid
        )
        # chat_message_history.clear()#清空历史记录
        print("chat_message_history:", chat_message_history.messages)
        store_message = chat_message_history.messages
        if len(store_message) > 10:
            prompt = ChatPromptTemplate.from_messages(
                [
                    (
                        "system",
                        self.SYSTEMPL + "\n这是一段你和用户的对话记忆，对其进行总结摘要，摘要使用第一人称‘我’，并且提取其中的用户关键信息，如姓名、年龄、性别、出生日期等。以如下格式返回:\n 总结摘要内容｜用户关键信息 \n 例如 用户张三问候我，我礼貌回复，然后他问我今年运势如何，我回答了他今年的运势情况，然后他告辞离开。｜张三,生日1999年1月1日"
                    ),
                    ("user", "{input}"),
                ]
            )
            chain = prompt | self.chatmodel
            summary = chain.invoke({"input": store_message, "who_you_are": self.MOODS[self.QingXu]["roleSet"]})
            print("summary:", summary)
            chat_message_history.clear()
            chat_message_history.add_message(summary)
            print("总结后：", chat_message_history.messages)
        return chat_message_history

    def run(self, query):
        result = self.agent_executor.invoke({"input": query, "chat_history": self.memory.messages})
        return result

@app.post("/chat")
def chat(query: str, context: str, background_tasks: BackgroundTasks):
    master = InterviewServer()
    master.sessionId = context
    msg = master.run(query)
    unique_id = str(uuid.uuid4())  # 生成唯一的标识符
    return {"msg": msg, "id": unique_id}

'''
测试fastapi启动后是否能正常工作
'''
@app.get("/")
def test_demo():
    return {"Hello": "World11"}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)






