"""
智能提示词系统
提供基于需求复杂度和AI模型特性的智能提示词生成功能
"""

import logging
import re
from typing import Dict, Any, Tuple, Optional
from app.services.optimized_prompts import (
    get_ultra_fast_prompt, get_fast_prompt, get_balanced_prompt,
    get_optimized_system_prompt, get_optimized_user_prompt,
    PromptOptimizer
)

logger = logging.getLogger(__name__)


def analyze_requirement_complexity(requirement: str) -> str:
    """
    分析需求复杂度

    Args:
        requirement: 需求文本

    Returns:
        复杂度级别: ultra_simple, simple, medium, complex
    """
    if not requirement or len(requirement.strip()) < 10:
        return "ultra_simple"

    # 计算各种复杂度指标
    length = len(requirement)

    # 功能点数量（通过关键词识别）
    function_keywords = ['功能', '模块', '系统', '管理',
                         '查询', '添加', '删除', '修改', '登录', '注册']
    function_count = sum(
        1 for keyword in function_keywords if keyword in requirement)

    # 业务流程复杂度（通过流程关键词识别）
    process_keywords = ['流程', '审批', '工作流', '步骤', '阶段', '环节']
    process_count = sum(
        1 for keyword in process_keywords if keyword in requirement)

    # 技术复杂度（通过技术关键词识别）
    tech_keywords = ['数据库', '接口', 'API', '权限', '角色', '集成', '同步', '异步']
    tech_count = sum(1 for keyword in tech_keywords if keyword in requirement)

    # 计算总复杂度分数
    complexity_score = (
        length * 0.01 +  # 长度权重
        function_count * 10 +  # 功能点权重
        process_count * 15 +  # 流程权重
        tech_count * 12  # 技术权重
    )

    # 根据分数确定复杂度级别
    if complexity_score < 20:
        return "ultra_simple"
    elif complexity_score < 50:
        return "simple"
    elif complexity_score < 100:
        return "medium"
    else:
        return "complex"


def get_smart_prompt_and_config(requirement: str, speed_priority: bool = True, model: str = None) -> Tuple[str, str, Dict[str, Any]]:
    """
    获取智能提示词和配置

    Args:
        requirement: 需求文本
        speed_priority: 是否优先考虑速度
        model: 模型名称（用于推理模型检测）

    Returns:
        (system_prompt, user_prompt, config)
    """
    try:
        # 分析需求复杂度
        complexity = analyze_requirement_complexity(requirement)
        logger.info(f"需求复杂度分析结果: {complexity}")

        # 检查是否为推理模型，如果是则使用专门配置
        reasoning_config = get_reasoning_model_config(requirement, model)
        if reasoning_config:
            logger.info(f"检测到推理模型 {model}，使用推理模型专用配置")
            system_prompt = get_optimized_system_prompt()
            user_prompt = get_optimized_user_prompt(requirement)

            logger.info(
                f"推理模型配置 - 复杂度: {complexity}, max_tokens: {reasoning_config['max_tokens']}")
            logger.info(
                f"系统提示词长度: {len(system_prompt)}, 用户提示词长度: {len(user_prompt)}")
            logger.info(f"推理模型配置: {reasoning_config}")

            return system_prompt, user_prompt, reasoning_config

        # 根据复杂度和优先级选择提示词策略（非推理模型）
        if speed_priority:
            if complexity == "ultra_simple":
                system_prompt = get_ultra_fast_prompt()
                config = {
                    "max_tokens": 800,
                    "temperature": 0.1,
                    "expected_cases": 3
                }
            elif complexity == "simple":
                system_prompt = get_fast_prompt()
                config = {
                    "max_tokens": 1200,
                    "temperature": 0.15,
                    "expected_cases": 5
                }
            elif complexity == "medium":
                system_prompt = get_balanced_prompt()
                config = {
                    "max_tokens": 2000,
                    "temperature": 0.2,
                    "expected_cases": 8
                }
            else:  # complex
                system_prompt = get_optimized_system_prompt()
                config = {
                    "max_tokens": 2500,
                    "temperature": 0.25,
                    "expected_cases": 12
                }
        else:
            # 质量优先模式
            if complexity in ["ultra_simple", "simple"]:
                system_prompt = get_balanced_prompt()
                config = {
                    "max_tokens": 1500,
                    "temperature": 0.2,
                    "expected_cases": 6
                }
            else:
                system_prompt = get_optimized_system_prompt()
                config = {
                    "max_tokens": 3000,
                    "temperature": 0.3,
                    "expected_cases": 15
                }

        # 生成用户提示词
        user_prompt = get_optimized_user_prompt(requirement)

        logger.info(f"智能提示词生成完成 - 复杂度: {complexity}, 速度优先: {speed_priority}")
        logger.info(
            f"系统提示词长度: {len(system_prompt)}, 用户提示词长度: {len(user_prompt)}")
        logger.info(f"配置: {config}")

        return system_prompt, user_prompt, config

    except Exception as e:
        logger.error(f"智能提示词生成失败: {str(e)}")
        # 回退到默认配置
        return get_fast_prompt(), f"需求：{requirement}\n\n生成测试用例JSON：", {
            "max_tokens": 2000,
            "temperature": 0.2,
            "expected_cases": 5
        }


def optimize_prompt_for_model(prompt: str, provider: str, model: str) -> str:
    """
    为特定AI模型优化提示词

    Args:
        prompt: 原始提示词
        provider: AI提供商
        model: 模型名称

    Returns:
        优化后的提示词
    """
    try:
        # 模型特定优化
        if provider.lower() in ['doubao', '豆包', 'bytedance']:
            # 豆包模型优化
            if 'flash' in model.lower():
                # Flash模型偏好简洁指令
                optimized = _optimize_for_doubao_flash(prompt)
            else:
                # 其他豆包模型
                optimized = _optimize_for_doubao_standard(prompt)

        elif provider.lower() in ['openai', 'gpt']:
            # OpenAI模型优化
            optimized = _optimize_for_openai(prompt)

        elif provider.lower() in ['deepseek']:
            # DeepSeek模型优化
            optimized = _optimize_for_deepseek(prompt)

        elif provider.lower() in ['claude', 'anthropic']:
            # Claude模型优化
            optimized = _optimize_for_claude(prompt)

        else:
            # 通用优化
            optimized = _optimize_for_generic(prompt)

        logger.info(f"为 {provider}/{model} 优化提示词完成，长度: {len(optimized)}")
        return optimized

    except Exception as e:
        logger.error(f"提示词优化失败: {str(e)}")
        return prompt


def get_reasoning_model_config(requirement: str, model: str = None) -> Dict[str, Any]:
    """
    为推理模型获取专门的配置

    Args:
        requirement: 需求文本
        model: 模型名称

    Returns:
        推理模型专用配置
    """
    # 检查是否为推理模型
    reasoning_models = {
        'deepseek-r1', 'deepseek-r1-distill', 'deepseek-r1-zero', 'deepseek-reasoner',
        'o1-preview', 'o1-mini', 'o1-pro',
        'qwen-qvq', 'qwen-with-search'
    }

    is_reasoning = model and any(rm in model.lower()
                                 for rm in reasoning_models)

    if not is_reasoning:
        return {}

    # 分析需求复杂度
    complexity = analyze_requirement_complexity(requirement)

    # 推理模型需要更大的token空间来容纳思考过程
    if complexity == "ultra_simple":
        return {
            "max_tokens": 4000,  # 增大到4000
            "temperature": 0.1,
            "expected_cases": 3,
            "reasoning_model": True
        }
    elif complexity == "simple":
        return {
            "max_tokens": 6000,  # 增大到6000
            "temperature": 0.15,
            "expected_cases": 5,
            "reasoning_model": True
        }
    elif complexity == "medium":
        return {
            "max_tokens": 8000,  # 增大到8000
            "temperature": 0.2,
            "expected_cases": 8,
            "reasoning_model": True
        }
    else:  # complex
        return {
            "max_tokens": 12000,  # 增大到12000
            "temperature": 0.25,
            "expected_cases": 12,
            "reasoning_model": True
        }


def _optimize_for_doubao_flash(prompt: str) -> str:
    """为豆包Flash模型优化提示词"""
    # Flash模型偏好简洁、直接的指令
    optimized = prompt.replace("请", "").replace("您", "")
    optimized = re.sub(r'详细的?', '', optimized)
    optimized = re.sub(r'请注意[^。]*。', '', optimized)

    # 添加Flash模型特定的指令
    if "JSON" in optimized and "只返回JSON" not in optimized:
        optimized += "\n\n只返回JSON格式，无其他内容。"

    return optimized


def _optimize_for_doubao_standard(prompt: str) -> str:
    """为豆包标准模型优化提示词"""
    # 豆包模型偏好结构化指令
    if "要求：" not in prompt:
        # 添加结构化要求
        lines = prompt.split('\n')
        if len(lines) > 3:
            lines.insert(2, "\n要求：")

    return prompt


def _optimize_for_openai(prompt: str) -> str:
    """为OpenAI模型优化提示词"""
    # OpenAI模型偏好明确的角色定义
    if not prompt.startswith("You are") and not prompt.startswith("你是"):
        prompt = "你是专业的测试工程师。" + prompt

    return prompt


def _optimize_for_deepseek(prompt: str) -> str:
    """为DeepSeek模型优化提示词"""
    # DeepSeek模型偏好逻辑清晰的指令
    if "步骤：" not in prompt:
        # 添加步骤化指导
        prompt += "\n\n请按以下步骤执行：\n1. 分析需求\n2. 设计测试场景\n3. 生成JSON格式测试用例"

    return prompt


def _optimize_for_claude(prompt: str) -> str:
    """为Claude模型优化提示词"""
    # Claude偏好详细的上下文
    if "背景：" not in prompt:
        prompt = "背景：作为测试工程师，需要根据需求生成测试用例。\n\n" + prompt

    return prompt


def _optimize_for_generic(prompt: str) -> str:
    """通用模型优化"""
    # 确保包含基本的JSON格式要求
    if "JSON" in prompt and "格式" not in prompt:
        prompt += "\n\n请严格按照JSON格式返回结果。"

    return prompt


# 导出主要函数
__all__ = [
    'get_smart_prompt_and_config',
    'optimize_prompt_for_model',
    'analyze_requirement_complexity',
    'get_reasoning_model_config'
]
