from langchain_openai import ChatOpenAI
import os
import pandas as pd
import json
import uuid
from datetime import datetime
from .extract_graph import ExtractGraph
from langchain_classic.memory import ConversationBufferWindowMemory
from prompt.system_prompt import SYSTEM_PROMPT
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableSequence

class ChatFriend():
    ENTITIES_PATH = "output/entities.parquet"
    RELATIONSHIPS_PATH = "output/relationships.parquet"
    CHAT_PATH = "output/chat.parquet"

    def __init__(self):
        self.model = ChatOpenAI(
            model="Qwen3-8B:Q5_K_M",
            streaming=True,
            base_url="http://localhost:11434/v1",  # 关键：Ollama 暴露的 OpenAI 兼容端点
            api_key="ollama",
            temperature=0.7,
            timeout=30,  # 可选：设置超时时间（秒），避免长时间无响应
            max_completion_tokens=20 * 1024
        )

        self.extract_graph = ExtractGraph(self.model)

        self.window_memory = ConversationBufferWindowMemory(
            k=15,  # 关键参数：保留最近 3 轮（1 轮 = 用户+AI，共 6 条消息）
            return_messages=True,  # 返回 Message 对象，适配 Prompt
            input_key="user_input",
            output_key="ai_output"
        )

        self.prompt = ChatPromptTemplate.from_messages([
            ("system", SYSTEM_PROMPT),
            MessagesPlaceholder(variable_name="history"),  # 动态注入组合记忆
            ("human", "你需要根据历史信息与用户进行交流，不要编造和脑补，不要过多提问。用户最新输入：{user_input}")
        ])

        self.chain = RunnableSequence(
            self.load_combined_memory,
            self.prompt,
            self.model
        )

        if os.path.isfile(self.ENTITIES_PATH):
            print(f"✅ 文件「{self.ENTITIES_PATH}」存在！")
            self.entities_df = pd.read_parquet(self.ENTITIES_PATH, engine="pyarrow")
        else:
            print(f"❌ 文件「{self.ENTITIES_PATH}」不存在！")
            self.entities_df = pd.DataFrame()


        if os.path.isfile(self.RELATIONSHIPS_PATH):
            print(f"✅ 文件「{self.RELATIONSHIPS_PATH}」存在！")
            self.relationships_df = pd.read_parquet(self.RELATIONSHIPS_PATH, engine="pyarrow")
        else:
            print(f"❌ 文件「{self.RELATIONSHIPS_PATH}」不存在！")
            self.relationships_df = pd.DataFrame()


        if os.path.isfile(self.CHAT_PATH):
            print(f"✅ 文件「{self.CHAT_PATH}」存在！")
            self.chat_df = pd.read_parquet(self.CHAT_PATH, engine="pyarrow")
        else:
            print(f"❌ 文件「{self.CHAT_PATH}」不存在！")
            self.chat_df = pd.DataFrame()

    def load_combined_memory(self, user_input):
        # 加载两个记忆的内容
        recent_dialog = self.window_memory.load_memory_variables({})["history"]  # 近期 3 轮完整对话

        return {
            "history": recent_dialog,
            "user_input": user_input
        }

    def save_graph(self):
        self.chat_df.to_parquet(
            self.CHAT_PATH,             # 保存路径
            index=False,                # 不保存索引列（关键，避免冗余）
            engine="pyarrow",           # 解析引擎（需提前安装：pip install pyarrow）
            compression="snappy",       # 压缩格式（snappy 速度快，gzip 压缩比更高）
        )

        self.entities_df.to_parquet(
            self.ENTITIES_PATH,             # 保存路径
            index=False,                # 不保存索引列（关键，避免冗余）
            engine="pyarrow",           # 解析引擎（需提前安装：pip install pyarrow）
            compression="snappy",       # 压缩格式（snappy 速度快，gzip 压缩比更高）
        )

        self.relationships_df.to_parquet(
            self.RELATIONSHIPS_PATH,             # 保存路径
            index=False,                # 不保存索引列（关键，避免冗余）
            engine="pyarrow",           # 解析引擎（需提前安装：pip install pyarrow）
            compression="snappy",       # 压缩格式（snappy 速度快，gzip 压缩比更高）
        )

    def generate_uuid(self):
        # 生成UUID4对象
        uuid_obj = uuid.uuid4()
        # 转换为字符串（常用格式，带连字符）
        uuid_str = str(uuid_obj)
        return uuid_str
    
    def get_time(self):
        now = datetime.now()  # 包含年/月/日/时/分/秒/微秒
        time_str = now.strftime(r"%Y-%m-%d %H:%M:%S")
        return time_str
    
    def invoke(self, prompt: str):
        time_str = self.get_time()
        uuid_str = self.generate_uuid()
        new_user_prompt = [
            {"uuid": uuid_str, "time": time_str, "text": f"用户：{prompt}"}
        ]
        self.chat_df = pd.concat([self.chat_df, pd.DataFrame(new_user_prompt)], ignore_index=True)

        # ans_json = self.extract_graph.extract_graph(prompt)
        # self.entities_df

        full_reply = []
        for chunk in self.chain.stream(f"用户：{prompt}"):
            token = chunk.content if chunk.content else ""
            if token:
                print(token, end="", flush=True)
                full_reply.append(token)

        print("\n")

        assistant_reply = "".join(full_reply).strip()
        time_answer = self.get_time()
        uuid_answer = self.generate_uuid()

        answer_prompt = [
            {"uuid": uuid_answer, "time": time_answer, "text": f"AI: {assistant_reply}"}
        ]
        self.chat_df = pd.concat([self.chat_df, pd.DataFrame(answer_prompt)], ignore_index=True)

        # 同步当前对话到两个记忆组件（关键：确保下次加载时包含最新对话）
        self.window_memory.save_context({"user_input": f"{time_str} 用户：{prompt}"}, {"ai_output": f"{time_answer} AI: {assistant_reply}"})


