import os
import json
import logging
from typing import Dict, Any
from .base_processor import BaseProcessor
from prompts import (
    PROMPT_EVALUATION_SINGLE_DEPTH_SYSTEM, PROMPT_EVALUATION_SINGLE_DEPTH_USER,
    PROMPT_EVALUATION_COMPARISON_SYSTEM, PROMPT_EVALUATION_COMPARISON_USER,
    PROMPT_EVALUATION_RANKING_SYSTEM, PROMPT_EVALUATION_RANKING_USER,
    PROMPT_EVALUATION_OTHER_SYSTEM, PROMPT_EVALUATION_OTHER_USER
)

# 获取当前模块的日志记录器
from app.utils.logger import get_logger

# 传入当前模块名，获取已配置好的日志器
logger = get_logger(__name__)


class EvaluationProcessor(BaseProcessor):
    """测评类处理器（全面替换request为requirements）"""

    def __init__(self, volcano_client, additional_info=""):
        super().__init__(volcano_client)
        self.additional_info = additional_info
        logger.info(f"🎬 EvaluationProcessor初始化完成，附加信息: {additional_info}")

    async def process(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
        """处理测评类任务（删除request，仅保留requirements）"""
        logger.info("🚀 开始处理测评任务")

        # 提取入参（全面删除request，仅保留requirements）
        topic_result = inputs.get("topic_result", {})
        creator_style = inputs.get("creator_style", {})
        product_highlights = inputs.get("product_highlights", "")
        outline_advice = inputs.get("outline_advice", "")
        notice = inputs.get("notice", "")
        requirements = inputs.get("requirements", "")  # 仅保留requirements
        product_name = inputs.get("product_name", "")
        style_judgment_result = inputs.get("style_judgment_result", "")
        matched_direction = inputs.get("matched_direction", "evaluation_其他")

        # 记录关键输入参数
        logger.info(f"📋 输入参数解析完成 - 匹配方向: {matched_direction}")
        logger.info(f"📝 需求文本长度: {len(requirements)} 字符")
        logger.info(f"🏷️ 产品名称: {product_name}")
        logger.info(f"🎯 产品亮点长度: {len(product_highlights)} 字符")
        logger.debug(f"主题结果键值: {list(topic_result.keys()) if topic_result else '空'}")
        logger.debug(f"创作者风格键值: {list(creator_style.keys()) if creator_style else '空'}")

        # 输入验证日志
        if not requirements:
            logger.warning("⚠️  requirements参数为空，可能会影响生成质量")
        if not product_name:
            logger.warning("⚠️  产品名称为空")

        logger.info("🔄 开始调用模型处理...")

        # 调用模型（仅传递requirements）
        result = await self.select_and_call_model(
            matched_direction=matched_direction,
            topic_result=topic_result,
            creator_style=creator_style,
            product_highlights=product_highlights,
            outline_advice=outline_advice,
            notice=notice or self.additional_info,
            requirements=requirements,  # 仅传递requirements
            product_name=product_name,
            style_judgment_result=style_judgment_result
        )

        # 记录处理结果
        if "error" in result:
            logger.error(f"❌ 测评任务处理失败 - 方向: {matched_direction}, 错误: {result['error']}")
        else:
            logger.info(f"✅ 测评任务处理成功 - 方向: {matched_direction}")
            logger.debug(f"生成结果类型: {type(result)}")

        return {
            "content_type": "evaluation",
            "matched_direction": matched_direction,
            "result": result,
            "additional_info": self.additional_info
        }

    async def select_and_call_model(
            self,
            matched_direction: str,
            topic_result: Dict[str, Any],
            creator_style: Dict[str, Any],
            product_highlights: str,
            outline_advice: str,
            notice: str,
            requirements: str,  # 仅保留requirements
            product_name: str,
            style_judgment_result: str
    ) -> Dict[str, Any]:
        """一对一方向调用（prompt中仅使用requirements）"""
        logger.info(f"🎯 选择模型处理方向: {matched_direction}")

        topic_result_str = json.dumps(topic_result, ensure_ascii=False, indent=2)
        creator_style_str = json.dumps(creator_style, ensure_ascii=False, indent=2)

        # 记录提示词构建信息
        logger.debug(f"构建提示词 - 主题结果长度: {len(topic_result_str)}")
        logger.debug(f"构建提示词 - 创作者风格长度: {len(creator_style_str)}")
        logger.debug(f"构建提示词 - 需求文本长度: {len(requirements)}")

        model_response = None

        try:
            # 1. 单品深度测评（仅用requirements）
            if matched_direction == "单品深度测评视频大纲":
                logger.info("📹 处理单品深度测评视频大纲")
                user_prompt = PROMPT_EVALUATION_SINGLE_DEPTH_USER.format(
                    topic_result=topic_result_str,
                    creator_style=creator_style_str,
                    product_highlights=product_highlights,
                    outline_advice=outline_advice,
                    notice=notice,
                    requirements=requirements,  # 替换原request
                    product_name=product_name
                )
                logger.debug(f"单品深度测评提示词长度: {len(user_prompt)}")
                model_response = await self.call_model(
                    system_prompt=PROMPT_EVALUATION_SINGLE_DEPTH_SYSTEM,
                    user_prompt=user_prompt
                )

            # 2. 横向对比测评（仅用requirements）
            elif matched_direction == "横向对比测评视频大纲":
                logger.info("🔄 处理横向对比测评视频大纲")
                user_prompt = PROMPT_EVALUATION_COMPARISON_USER.format(
                    topic_result=topic_result_str,
                    creator_style=creator_style_str,
                    product_highlights=product_highlights,
                    outline_advice=outline_advice,
                    notice=notice,
                    requirements=requirements,  # 替换原request
                    product_name=product_name
                )
                logger.debug(f"横向对比测评提示词长度: {len(user_prompt)}")
                model_response = await self.call_model(
                    system_prompt=PROMPT_EVALUATION_COMPARISON_SYSTEM,
                    user_prompt=user_prompt
                )

            # 3. 榜单推荐测评（仅用requirements）
            elif matched_direction == "榜单推荐视频大纲":
                logger.info("🏆 处理榜单推荐视频大纲")
                user_prompt = PROMPT_EVALUATION_RANKING_USER.format(
                    topic_result=topic_result_str,
                    creator_style=creator_style_str,
                    product_highlights=product_highlights,
                    outline_advice=outline_advice,
                    notice=notice,
                    requirements=requirements,  # 替换原request
                    product_name=product_name
                )
                logger.debug(f"榜单推荐测评提示词长度: {len(user_prompt)}")
                model_response = await self.call_model(
                    system_prompt=PROMPT_EVALUATION_RANKING_SYSTEM,
                    user_prompt=user_prompt
                )

            # 4. evaluation_其他（仅用requirements）
            elif matched_direction == "evaluation_其他":
                logger.info("📝 处理evaluation_其他类型")
                user_prompt = PROMPT_EVALUATION_OTHER_USER.format(
                    topic_result=topic_result_str,
                    creator_style=creator_style_str,
                    product_highlights=product_highlights,
                    outline_advice=outline_advice,
                    notice=notice,
                    requirements=requirements,  # 替换原request
                    product_name=product_name,
                    style_judgment_result=style_judgment_result
                )
                logger.debug(f"evaluation_其他提示词长度: {len(user_prompt)}")
                model_response = await self.call_model(
                    system_prompt=PROMPT_EVALUATION_OTHER_SYSTEM,
                    user_prompt=user_prompt
                )

            else:
                logger.error(f"❌ 未知的测评方向: {matched_direction}")
                return {"error": f"未知的测评方向: {matched_direction}", "raw_inputs": locals()}

            # 记录模型响应信息
            if model_response:
                logger.info(f"🤖 模型调用成功，响应长度: {len(model_response)}")
                logger.debug(f"模型响应前500字符: {model_response[:500]}...")
            else:
                logger.error("❌ 模型响应为空")
                return {"error": "模型响应为空", "raw_inputs": locals()}

        except Exception as e:
            logger.error(f"💥 模型调用过程中发生异常: {str(e)}", exc_info=True)
            return {"error": f"模型调用异常: {str(e)}", "raw_inputs": locals()}

        # 解析JSON响应
        logger.info("🔄 开始解析模型响应JSON")
        parsed_result = self.parse_json_response(model_response)

        if "error" in parsed_result:
            logger.error(f"❌ JSON解析失败: {parsed_result['error']}")
            logger.debug(f"原始响应内容: {model_response}")
        else:
            logger.info("✅ JSON解析成功")
            logger.debug(
                f"解析结果键值: {list(parsed_result.keys()) if isinstance(parsed_result, dict) else '非字典类型'}")

        return parsed_result