# websocket_module/manager.py
import aiohttp
import asyncio
import json


from fastapi import WebSocket
from typing import Dict, List
from concurrent.futures import Future
from threading import Lock
from sqlalchemy.orm import Session

from conversation_log.service import get_conversation_logs_keyword, get_conversation_log

from config.logger import logging

logger = logging.getLogger(__name__)


class ConnectionManager:
    def __init__(self):
        self.active_connections: dict[str, WebSocket] = {}
        self.model_tasks: Dict[str, Future] = {}
        self.task_lock = Lock()
        self.keyword_rules = {}

    async def connect(self, client_id: str, websocket: WebSocket):
        self.active_connections[client_id] = websocket

    def disconnect(self, client_id: str):
        if client_id in self.active_connections:
            del self.active_connections[client_id]

    async def send_to_client(self, client_id: str, message: str):
        if client_id in self.active_connections:
            await self.active_connections[client_id].send_text(message)

    async def broadcast(self, message: str):
        for connection in self.active_connections.values():
            await connection.send_text(message)

    def get_active_connections(self) -> List[WebSocket]:
        return list(self.active_connections.values())

    async def handle_reply(self, client_id: str, data: dict, db: Session):
        """
        处理客户端发来的 JSON 消息
        :param db:
        :param client_id: 客户端 ID
        :param data: 客户端发送的 JSON 数据，已解析为 dict
        """
        conversation_type = data.get("conversationType")
        cancel_flag = data.get("cancel", False)
        request_id = data.get("requestId", "")

        if cancel_flag:
            await self.handle_cancel(request_id, client_id)
            return

        if conversation_type == "chat":
            content = data.get("content", "")
            await self.handle_chat(request_id, client_id, content)
        elif conversation_type == "model":
            question = data.get("question", "")
            # 加个判断,判断问题是否包含关键词，如果包含，则转为查数据库给出答案

            keyword_rules = get_conversation_logs_keyword(db)
            logger.info("keyword_rules: %s", keyword_rules)
            matched = False
            for keyword_rule in keyword_rules:
                keyword_str = keyword_rule[1]
                if self.contains_all_keywords(question, keyword_str):
                    logger.info(f"命中规则：{keyword_str}")
                    # 可以调用数据库查询逻辑
                    await self.handle_database_query(db, request_id, client_id, keyword_rule[0])
                    matched = True
                    break
            if not matched:
                logger.info("未命中规则，调用大模型")
                await self.handle_model_request(request_id, client_id, question)
        elif conversation_type == 'change':
            modeValue = data.get("modeValue", "model")
            self.handle_change(modeValue)

    def handle_change(self, modeValue):
        """
        处理模型切换请求
        :param modeValue:
        """
        self.broadcast(json.dumps({
            "conversationType": "change",
            "modeValue": modeValue
        }))

    async def handle_chat(self, request_id: str, client_id: str, content: str):
        """
        处理普通聊天消息，广播给所有连接的客户端
        """
        message = {
            "requestId": request_id,
            "conversationType": "chat",
            "from": client_id,
            "content": content
        }
        await self.broadcast(json.dumps(message))

    async def handle_model_request(self, request_id: str, client_id: str, question: str):
        """
        启动后台异步任务调用大模型，并流式返回结果
        """
        async def model_task():
            try:
                async for chunk in self.generate_model_response(question):
                    logger.info("Model response chunk: %s", chunk)
                    # 如果任务已被取消，直接退出
                    if asyncio.current_task().cancelled():
                        logger.info("Model task was cancelled, stopping broadcast.")
                        break
                    message = {
                        "client_id": client_id,
                        "data": {
                            "requestId": request_id,
                            "conversationType": "model",
                            "thinking": chunk.get("reasoning_content", ""),
                            "response": chunk.get("content", "")
                        }
                    }
                    await self.broadcast(json.dumps(message))
                    await asyncio.sleep(0.05)  # 控制输出节奏
            #     发送一个结束的指令
                await self.broadcast(json.dumps({
                    "client_id": client_id,
                    "data": {
                        "requestId": request_id,
                        "conversationType": "model",
                        "thinking": "",
                        "response": "",
                        "finished": True
                    }
                }))
            except asyncio.CancelledError:
                logger.info("Model task cancelled explicitly.")
                # 可选：通知前端已取消
                await self.broadcast(json.dumps({
                    "client_id": client_id,
                    "data": {
                        "requestId": request_id,
                        "conversationType": "model",
                        "thinking": "",
                        "response": "",
                        "finished": True
                    }
                }))
                raise
            except Exception as e:
                logger.error(f"Error in model task: {e}")
                message = {
                    "client_id": client_id,
                    "data": {
                        "requestId": request_id,
                        "conversationType": "model",
                        "thinking": "",
                        "response": ""
                    }
                }
                await self.broadcast(json.dumps(message))

        future = asyncio.create_task(model_task())
        with self.task_lock:
            self.model_tasks[client_id] = future

    async def handle_cancel(self, request_id: str, client_id: str):
        """
        处理取消模型推理请求
        """
        with self.task_lock:
            message = {
                "client_id": client_id,
                "data": {
                    "requestId": request_id,
                    "conversationType": "model",
                    "thinking": "",
                    "response": ""
                }
            }
            future = self.model_tasks.pop(client_id, None)
            if future and not future.done():
                future.cancel()
                await self.broadcast(json.dumps(message))

    async def generate_model_response(self, question):
        """
           调用远程模型服务接口，流式获取大模型响应。
           :param question: 用户输入的问题或指令
           :param api_key: 认证用的 API Key
           """
        url = "http://81.71.4.52:30886/v1/chat/completions"
        headers = {
            "Authorization": "Bearer fastgpt-nlsGUaVd3UPRoJ9GPJjMwYssXebud6eWUh37IjHN986aP0BH926tHb",
            "Content-Type": "application/json"
        }
        payload = {
            "chatId": "my_chatId",
            "stream": True,
            "detail": False,
            "messages": [{"role": "user", "content": question}]
        }

        async with aiohttp.ClientSession() as session:
            async with session.post(url, headers=headers, json=payload) as response:
                # response_text = await response.text()
                if response.status != 200:
                    logger.error(f"Model API error: {response.status}")
                    yield {"error": f"API Error: {response.status}"}
                    return
                # logger.info("Model API response: %s", response_text)
                # 这里要读response body 读到有东西进入死循环输出，检测到done退出，不然是一次性输出的

                async for line in response.content:
                    # 检查当前任务是否被取消
                    if asyncio.current_task().cancelled():
                        logger.info("Model generation task cancelled.")
                        return
                    # logger.info("Model API response line: %s", line)
                    line = line.decode('utf-8').strip()
                    if not line or not line.startswith("data: "):
                        continue
                    data_str = line[6:]
                    if data_str == "[DONE]":
                        break
                    try:
                        logger.info("datastr: %s", data_str)
                        data = json.loads(data_str)
                        # 提取 content 或 reasoning_content
                        reasoning = data["choices"][0]["delta"].get("reasoning_content", "")
                        content = data["choices"][0]["delta"].get("content", "")

                        yield {
                            "content": content,
                            "reasoning_content": reasoning
                        }
                    except Exception as e:
                        logger.warning(f"Error parsing model stream: {e}")

    async def handle_database_query(self, db: Session, request_id: str, client_id: str, question_id: int):
        #     根据id查询问题
        conversation_detail = get_conversation_log(db, question_id)
        logger.info("conversation_detail: %s", conversation_detail)
        #     逐字广播查询的结果，先广播思考内容，再广播结果
        if conversation_detail:
            thinking = conversation_detail.thinking or ""
            response = conversation_detail.response or ""

            await self.stream_database_response(
                client_id=client_id,  # 或者传入实际 client_id
                request_id=request_id,
                thinking=thinking,
                response=response
            )

    async def stream_database_response(self, client_id: str, request_id: str, thinking: str, response: str):
        """
        逐字广播数据库查询结果，先广播 thinking，再广播 response
        """
        # 构造初始消息结构
        message = {
            "client_id": client_id,
            "data": {
                "requestId": request_id,
                "conversationType": "model",
                "thinking": "",
                "response": "",
                "finished": False
            }
        }

        # 逐步广播 thinking 内容
        for char in thinking:
            message["data"]["thinking"] = char
            await self.broadcast(json.dumps(message))
            await asyncio.sleep(0.05)  # 控制节奏

        # 逐步广播 response 内容
        for char in response:
            message["data"]["thinking"] = ""
            message["data"]["response"] = char
            await self.broadcast(json.dumps(message))
            await asyncio.sleep(0.05)

        # 最后发送 finished 标志
        message["data"]["thinking"] = ""
        message["data"]["response"] = ""
        message["data"]["finished"] = True
        await self.broadcast(json.dumps(message))

    @staticmethod
    def contains_all_keywords(question: str, keyword_str: str) -> bool:
        # 分割并清洗关键词
        keywords = [k.strip() for k in keyword_str.split(',') if k.strip()]

        # 检查每个关键词是否都在问题中
        return all(keyword in question for keyword in keywords)
