from langgraph.graph import Graph,START, END
from typing import Dict, Any

from pydantic.config import JsonDict

from config.common_config import DATABASE_CONFIG
from database.embedding_model import EmbeddingModel
from database.milvus_client import MilvusClient
from models.memory_record import MemoryRecord, Role, ContentType

import requests

from models.workflow.inputModel import InputModel
from models.workflow.stateModel2 import StateModel2
import logging
import threading
from langgraph.graph import StateGraph

from server.memory_record_server import MemoryRecordServer

logger = logging.getLogger(__name__)

class MemoryFlow:
    _instance = None
    _lock = threading.Lock()
    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            with cls._lock:
                if not cls._instance:
                    cls._instance = super().__new__(cls)
                    cls._instance._initialized = False
        return cls._instance

    def __init__(self, ollama_api_url: str = "http://192.168.3.111:11434/api/chat", model_name: str = "qwen2.5:32b"):
        # Ollama 配置
        self.OLLAMA_API_URL = ollama_api_url
        self.MODEL_NAME = model_name
        self.milvus_client = MemoryRecordServer(
            host=DATABASE_CONFIG["HOST"],
    port=DATABASE_CONFIG["PORT"],
    user=DATABASE_CONFIG["USER"],
    pwd=DATABASE_CONFIG["PASSWORD"])
        self.embedding_model = EmbeddingModel()
        self.app=self.createWorkFolow()

    #定义节点函数

    ##1. 大模型优化段落

    def optimize_step(self,state: InputModel):

        user_id: str = state.user_id
        content: str = state.content
        optimized_content, happen_time, type = self.optimize_text(content)
        state = StateModel2(type=type,user_id=user_id, content=content, optimized_content=optimized_content, happen_time=happen_time)
        return state

    ##2. 保存

    def save_step(self,state: StateModel2):
        record = MemoryRecord(
            user_id=state.user_id,
            content=state.content,
            type=state.type,
            optimized_content=state.optimized_content,
            happen_time=state.happen_time,
            relation=state.relation,
            title=state.title,
            detail=state.detail,

        )
        embedding=self.embedding_model.get_embedding(state.content)
        record_id = self.milvus_client.insert_record(record, embedding)
        return state

    def createWorkFolow(self):
        workflow = StateGraph(state_schema=self.OptimizeState)

        workflow.add_node("optimize_step", self.optimize_step)
        workflow.add_node("save_step", self.save_step)

        workflow.set_entry_point("optimize_step")
        workflow.add_edge("optimize_step", "save_step")
        app = workflow.compile()

        return app

    def optimize_text(self,input_text: str) -> (str, str, str):
        prompt = (
            "请将我提供的一段文字进行优化，使表达更通顺、简洁自然。同时，提取出描述的事件发生的时间（如果有明确时间的话；如果没有时间，请返回空字符串）。"
            "另外，请根据文本的内容给出一个分类标签（从这些标签中选择一个：童年与家乡, 家庭背景, 教育经历, 职业生涯, 婚姻与家庭, 子女教育, 人生挑战, 朋友与社交, 兴趣与爱好, 人生哲学, 社会变迁, 人生感悟）。"
            "最后请按照下面的格式返回：\n"
            "优化后的文本：在这里输出优化后的文字\n"
            "事件发生时间：在这里输出时间，如“2023年5月”或为空\n"
            "分类标签：在这里输出分类标签\n"
            f"[文字]\n{input_text}"
        )

        try:
            response = requests.post(
                self.LLM_API_URL,
                json={
                    "model": "qwen2.5:32b",
                    "messages": [{"role": "user", "content": prompt}],
                    "option": {"temperature": 0},
                    "stream": False,
                }
            )
            response.raise_for_status()
            content = response.json().get("message", {}).get("content", "未能生成答案")
            content = response.sub(r'<think>.*?</think>', '', content, flags=response.DOTALL)

            print("模型返回的内容：", content)

            parts = content.split("优化后的文本：")
            if len(parts) < 2:
                raise ValueError(f"返回的内容格式不正确，无法从内容中提取出'优化后的文本'部分。返回内容：{content}")

            optimized_text = parts[1].split("事件发生时间：")[0].strip()

            event_time_section = parts[1].split("事件发生时间：")
            if len(event_time_section) < 2:
                event_time = ""
            else:
                event_time = event_time_section[1].split("分类标签：")[0].strip()

            category_section = event_time_section[1].split("分类标签：")
            if len(category_section) < 2:
                category = ""
            else:
                category = category_section[1].strip()

            return optimized_text, event_time, category
        except requests.exceptions.RequestException as e:
            print(f"请求错误: {e}")
            return "", "", ""
        except ValueError as ve:
            print(f"解析错误: {ve}")
            return "", "", ""

    def memory(self,userId:str,req:str):

        input = InputModel(userId= userId, contentType= "text", content= req)
        ress=self.app.invoke(input)

        print(type(ress) )

        return ress["res"]

