import json
import asyncio
import time
from redis.asyncio import Redis
from config.env import RedisConfig
from ocr_core.schema import LlmTask, TaskStatus, LlmTaskResult
from utils.ocr_llm_util import OcrLlmChat, OcrLlmConfig
from utils.log_util import logger
from config.mongodb import MongoDB, LlmResultMDB, LlmChatMDB
import re


class LlmConsumer:
    """并发 LLM 队列消费者"""

    def __init__(self, redis_client: Redis, concurrency: int = 8):
        self.redis: Redis = redis_client
        self.running: bool = True
        self.concurrency: int = concurrency
        self.workers = []
        MongoDB.init()

    async def start(self) -> None:
        """启动多个 worker 并发消费"""
        logger.debug(f"LLM 消费者启动，concurrency={self.concurrency}")
        self.workers = [
            asyncio.create_task(self._consume_loop(i)) for i in range(self.concurrency)
        ]
        await asyncio.gather(*self.workers)

    async def stop(self) -> None:
        """停止消费者"""
        self.running = False
        logger.debug("LLM 消费者正在停止...")
        # 等待所有 worker 停止
        if self.workers:
            await asyncio.gather(*self.workers, return_exceptions=True)

    async def _consume_loop(self, worker_id: int) -> None:
        """单个 worker 的循环消费"""
        logger.debug(f"LLM Worker-{worker_id} 已启动")
        while self.running:
            try:
                llm_task_msg = await self.redis.blpop(
                    RedisConfig.redis_llm_queue,
                    timeout=RedisConfig.consumer_timeout_seconds,
                )
                if not llm_task_msg:
                    continue

                _, serialized_task = llm_task_msg
                try:
                    task = LlmTask(**json.loads(serialized_task))
                except Exception as e:
                    logger.error(f"任务反序列化失败: {serialized_task}, 错误: {e}")
                    continue

                await self._handle_task(task, worker_id)

            except (ConnectionError, TimeoutError) as e:
                logger.error(
                    f"LLM Worker-{worker_id} Redis 连接错误: {e}, 5s 后重试..."
                )
                await asyncio.sleep(5)
            except Exception as e:
                logger.exception(f"LLM Worker-{worker_id} 消费循环异常: {e}")
                await asyncio.sleep(1)

    async def _handle_task(self, task: LlmTask, worker_id: int) -> None:
        """处理单个任务"""
        task_id = task.task_id
        logger.debug(f"LLM Worker-{worker_id} 开始处理任务 [task_id={task_id}]")

        existing_result = await self.redis.get(
            f"{RedisConfig.redis_llm_result}{task_id}"
        )
        if existing_result:
            try:
                result_obj = LlmTaskResult(**json.loads(existing_result))
                await self._update_task_result(result_obj)
            except Exception as e:
                logger.warning(
                    f"LLM Worker-{worker_id} 解析历史结果失败 [task_id={task_id}]: {e}"
                )

        try:
            result = await self._process_task(task)
            await self._update_task_result(result)
            logger.debug(f"LLM Worker-{worker_id} 任务 {task_id} 处理完成 ✅")
        except Exception as e:
            logger.exception(f"LLM Worker-{worker_id} 任务 {task_id} 执行失败 ❌: {e}")
            result = LlmTaskResult(
                task_id=task_id,
                scenario=task.scenario,
                data="",
                llm_ms=0,
                status=TaskStatus.FAILED,
                error=str(e),
            )
            await self._update_task_result(result)

    async def _update_task_result(self, result: LlmTaskResult) -> None:
        """更新任务结果到 Redis"""
        await self.redis.set(
            f"{RedisConfig.redis_llm_result}{result.task_id}",
            json.dumps(result.model_dump(), ensure_ascii=False),
            ex=3600,
        )
        LlmResultMDB.update_one(result.model_dump())

    async def _process_task(self, task: LlmTask) -> LlmTaskResult:
        """执行实际的 LLM 任务"""
        start_time = time.time()
        llm_config = await OcrLlmConfig.get_llm_config(task.scenario, self.redis)
        if not llm_config:
            raise ValueError("未找到 LLM 模型配置")

        final_prompt = llm_config.get("userPrompt", "").replace(
            "{{ocr_text}}", task.ocr_text or ""
        )

        # 如果有user_prompt，则使用字符串模板替换
        if task.user_prompt:
            try:
                user_prompts = json.loads(task.user_prompt)
                final_prompt = self.render_template(final_prompt, user_prompts)
            except Exception as e:
                logger.warning(
                    f"user_prompt 解析错误, task_id={task.task_id}, user_prompt: {task.user_prompt}，e: {e}"
                )

        raw_data = await OcrLlmChat.chat_for_label(final_prompt, llm_config)
        LlmChatMDB.insert_one(
            {
                "task_id": task.task_id,
                "user_promt": final_prompt,
                "model_params": llm_config,
            }
        )
        elapsed = int((time.time() - start_time) * 1000)

        try:
            parsed_data = json.loads(raw_data) if raw_data else []
        except json.JSONDecodeError:
            logger.warning(
                f"LLM 输出非 JSON 格式, task_id={task.task_id}, raw: {raw_data}"
            )
            parsed_data = raw_data

        return LlmTaskResult(
            task_id=task.task_id,
            scenario=task.scenario,
            data=parsed_data,
            llm_ms=elapsed,
            status=TaskStatus.COMPLETED,
            error="",
        )

    def render_template(self, template: str, variables: dict) -> str:
        """替换 {{key}} 占位符"""

        def replacer(match):
            key = match.group(1)
            return str(variables.get(key, match.group(0)))

        return re.sub(r"\{\{(\w+)\}\}", replacer, template)
