"""
增强版提示词工程工作流
支持个性化定制、上下文感知和智能优化
"""

import time
import logging
from typing import Dict, Any, List, Optional
from langgraph.graph import StateGraph, END

from src.research_core.prompt_eng_state import PromptEngineeringState
from src.research_core.prompt_eng_agents import (
    analyze_prompt_requirement,
    design_initial_prompt,
    evaluate_prompt,
    optimize_prompt,
    process_human_feedback,
    finalize_prompt,
    should_continue_analysis,
    should_process_feedback,
    run_ab_test,
    monitor_production_prompt,
    ab_test_prompts,
    collect_user_feedback,
    auto_optimize_prompt,
    test_prompt_in_production
)
from src.research_core.ab_testing import compare_prompt_versions

# 创建 logger 实例
logger = logging.getLogger(__name__)


def create_enhanced_prompt_engineering_workflow() -> StateGraph:
    """
    创建增强版提示词工程工作流
    
    工作流阶段：
    1. 需求分析 - analyze_requirement
    2. 提示词设计 - design_prompt
    3. 提示词评估 - evaluate_prompt
    4. 提示词优化 - optimize_prompt
    5. A/B测试 - ab_test_prompts
    6. 生产监控 - monitor_production
    7. 最终确定 - finalize_prompt
    
    反馈循环：
    - 人工反馈处理 - process_human_feedback
    - 条件判断 - should_process_feedback, should_continue_analysis
    
    Returns:
        StateGraph: 配置好的工作流图
    """
    
    # 定义工作流图
    workflow = StateGraph(PromptEngineeringState)
    
    # 添加节点
    workflow.add_node("analyze_requirement", analyze_prompt_requirement)
    workflow.add_node("design_prompt", design_initial_prompt)
    workflow.add_node("evaluate_prompt", evaluate_prompt)
    workflow.add_node("optimize_prompt", optimize_prompt)
    workflow.add_node("process_human_feedback", process_human_feedback)
    workflow.add_node("ab_test_prompts", run_ab_test)
    workflow.add_node("compare_versions", compare_prompt_versions)
    workflow.add_node("monitor_production", monitor_production_prompt)
    workflow.add_node("finalize_prompt", finalize_prompt)
    
    # 设置入口点
    workflow.set_entry_point("analyze_requirement")
    
    # 添加边
    workflow.add_edge("analyze_requirement", "design_prompt")
    workflow.add_edge("design_prompt", "evaluate_prompt")
    
    # 添加条件边 - 决定是否处理人工反馈
    workflow.add_conditional_edges(
        "evaluate_prompt",
        should_process_feedback,
        {
            "process_feedback": "process_human_feedback",
            "evaluate": "optimize_prompt"
        }
    )
    
    # 添加反馈处理后的边
    workflow.add_edge("process_human_feedback", "optimize_prompt")
    
    # 添加条件边 - 决定是否继续优化
    workflow.add_conditional_edges(
        "optimize_prompt",
        should_continue_analysis,
        {
            "optimize": "evaluate_prompt",
            "finalize": "ab_test_prompts"
        }
    )
    
    # 添加A/B测试后的边
    workflow.add_edge("ab_test_prompts", "compare_versions")
    
    # 添加版本比较后的边
    workflow.add_edge("compare_versions", "monitor_production")
    
    # 添加生产监控后的边
    workflow.add_edge("monitor_production", "finalize_prompt")
    
    # 设置结束点
    workflow.add_edge("finalize_prompt", END)
    
    return workflow


def execute_enhanced_prompt_engineering_workflow(
    initial_state: PromptEngineeringState
) -> Dict[str, Any]:
    """
    执行增强版提示词工程工作流
    
    Args:
        initial_state: 初始工作流状态
        
    Returns:
        工作流执行结果
    """
    try:
        # 创建工作流
        workflow = create_enhanced_prompt_engineering_workflow()
        app = workflow.compile()
        
        # 执行工作流
        result = app.invoke(initial_state)
        
        return {
            "success": True,
            "final_state": result,
            "error": None
        }
    except Exception as e:
        logger.error(f"执行增强版提示词工程工作流时出错: {e}")
        return {
            "success": False,
            "final_state": initial_state,
            "error": str(e)
        }


def create_personalized_prompt_engineering_workflow() -> StateGraph:
    """
    创建个性化提示词工程工作流
    
    工作流阶段：
    1. 用户分析 - analyze_user_preferences
    2. 需求分析 - analyze_requirement
    3. 提示词设计 - design_prompt
    4. 提示词评估 - evaluate_prompt
    5. 提示词优化 - optimize_prompt
    6. A/B测试 - ab_test_prompts
    7. 最终确定 - finalize_prompt
    
    Returns:
        StateGraph: 配置好的工作流图
    """
    
    # 定义工作流图
    workflow = StateGraph(PromptEngineeringState)
    
    # 添加节点
    workflow.add_node("analyze_user_preferences", _analyze_user_preferences)
    workflow.add_node("analyze_requirement", _personalized_analyze_requirement)
    workflow.add_node("design_prompt", _personalized_design_prompt)
    workflow.add_node("evaluate_prompt", evaluate_prompt)
    workflow.add_node("optimize_prompt", optimize_prompt)
    workflow.add_node("process_human_feedback", process_human_feedback)
    workflow.add_node("ab_test_prompts", run_ab_test)
    workflow.add_node("finalize_prompt", finalize_prompt)
    
    # 设置入口点
    workflow.set_entry_point("analyze_user_preferences")
    
    # 添加边
    workflow.add_edge("analyze_user_preferences", "analyze_requirement")
    workflow.add_edge("analyze_requirement", "design_prompt")
    workflow.add_edge("design_prompt", "evaluate_prompt")
    
    # 添加条件边 - 决定是否处理人工反馈
    workflow.add_conditional_edges(
        "evaluate_prompt",
        should_process_feedback,
        {
            "process_feedback": "process_human_feedback",
            "evaluate": "optimize_prompt"
        }
    )
    
    # 添加反馈处理后的边
    workflow.add_edge("process_human_feedback", "optimize_prompt")
    
    # 添加条件边 - 决定是否继续优化
    workflow.add_conditional_edges(
        "optimize_prompt",
        should_continue_analysis,
        {
            "optimize": "evaluate_prompt",
            "finalize": "ab_test_prompts"
        }
    )
    
    # 添加A/B测试后的边
    workflow.add_edge("ab_test_prompts", "finalize_prompt")
    
    # 设置结束点
    workflow.add_edge("finalize_prompt", END)
    
    return workflow


def create_advanced_prompt_engineering_workflow() -> StateGraph:
    """
    创建高级提示词工程工作流，整合基础版工作流的优点
    包含A/B测试、自动优化和生产环境测试等功能
    适用于对提示词质量要求较高的场景
    
    工作流阶段：
    1. 需求分析 - analyze_requirement
    2. 提示词设计 - design_prompt
    3. 反馈检查 - check_feedback
    4. 提示词评估 - evaluate_prompt
    5. A/B测试 - ab_test
    6. 收集反馈 - collect_feedback
    7. 自动优化 - auto_optimize
    8. 生产测试 - production_test
    9. 最终确定 - finalize_prompt
    """
    workflow = StateGraph(PromptEngineeringState)
    
    # 添加所有工作流节点
    workflow.add_node("analyze_requirement", analyze_prompt_requirement)
    workflow.add_node("design_prompt", design_initial_prompt)
    workflow.add_node("check_feedback", process_human_feedback)
    workflow.add_node("evaluate_prompt", evaluate_prompt)
    workflow.add_node("ab_test", ab_test_prompts)
    workflow.add_node("collect_feedback", collect_user_feedback)
    workflow.add_node("auto_optimize", auto_optimize_prompt)
    workflow.add_node("production_test", test_prompt_in_production)
    workflow.add_node("finalize_prompt", finalize_prompt)

    # 设置入口点
    workflow.set_entry_point("analyze_requirement")

    # 建立节点连接关系
    workflow.add_edge("analyze_requirement", "design_prompt")
    workflow.add_edge("design_prompt", "check_feedback")
    
    # 添加条件边 - 检查是否有反馈需要处理
    workflow.add_conditional_edges(
        "check_feedback",
        should_process_feedback,
        {
            "process_feedback": "auto_optimize",  # 如果有反馈，进入自动优化节点
            "evaluate": "evaluate_prompt"         # 如果没有反馈，直接进入评估
        }
    )

    # 评估后进行A/B测试
    workflow.add_edge("evaluate_prompt", "ab_test")
    
    # A/B测试后收集用户反馈
    workflow.add_edge("ab_test", "collect_feedback")
    
    # 收集反馈后进行自动优化
    workflow.add_edge("collect_feedback", "auto_optimize")
    
    # 自动优化后进行生产环境测试
    workflow.add_edge("auto_optimize", "production_test")
    
    # 生产环境测试后决定是否继续优化
    workflow.add_conditional_edges(
        "production_test",
        should_continue_analysis,
        {
            "optimize": "auto_optimize",     # 如果需要继续优化，回到自动优化
            "finalize": "finalize_prompt"    # 如果不需要优化，进入最终确定
        }
    )

    # 最终节点连接到结束
    workflow.add_edge("finalize_prompt", END)

    return workflow


def create_multi_version_prompt_engineering_workflow() -> StateGraph:
    """
    创建多版本并行优化的提示词工程工作流
    同时生成多个优化版本，通过A/B测试选择最佳版本
    """
    workflow = StateGraph(PromptEngineeringState)
    
    # 添加所有工作流节点
    workflow.add_node("analyze_requirement", analyze_prompt_requirement)
    workflow.add_node("design_prompt", design_initial_prompt)
    workflow.add_node("check_feedback", process_human_feedback)
    workflow.add_node("evaluate_prompt", evaluate_prompt)
    workflow.add_node("multi_version_optimize", _multi_version_optimize)
    workflow.add_node("ab_test_versions", _ab_test_prompt_versions)
    workflow.add_node("select_best_version", _select_best_prompt_version)
    workflow.add_node("finalize_prompt", finalize_prompt)

    # 设置入口点
    workflow.set_entry_point("analyze_requirement")

    # 建立节点连接关系
    workflow.add_edge("analyze_requirement", "design_prompt")
    workflow.add_edge("design_prompt", "check_feedback")
    
    # 添加条件边 - 检查是否有反馈需要处理
    workflow.add_conditional_edges(
        "check_feedback",
        should_process_feedback,
        {
            "process_feedback": "multi_version_optimize",  # 如果有反馈，进入多版本优化节点
            "evaluate": "evaluate_prompt"                  # 如果没有反馈，直接进入评估
        }
    )

    # 评估后进行多版本优化
    workflow.add_edge("evaluate_prompt", "multi_version_optimize")
    
    # 多版本优化后进行A/B测试
    workflow.add_edge("multi_version_optimize", "ab_test_versions")
    
    # A/B测试后选择最佳版本
    workflow.add_edge("ab_test_versions", "select_best_version")
    
    # 选择最佳版本后决定是否继续优化
    workflow.add_conditional_edges(
        "select_best_version",
        should_continue_analysis,
        {
            "optimize": "multi_version_optimize",  # 如果需要继续优化，回到多版本优化
            "finalize": "finalize_prompt"          # 如果不需要优化，进入最终确定
        }
    )

    # 最终节点连接到结束
    workflow.add_edge("finalize_prompt", END)

    return workflow


def _multi_version_optimize(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    多版本并行优化提示词
    """
    # 延迟初始化组件
    from src.research_core.prompt_eng_agents import _initialize_components
    _initialize_components()
    
    # 获取当前提示词
    current_prompt = state.get('current_prompt', '')
    requirement = state.get('requirement', '')
    
    # 调用LLM生成多个优化版本
    # 这里应该实现调用LLM生成多个版本的逻辑
    # 简化实现，实际应该调用LLM生成多个版本
    import random
    prompt_versions = {}
    version_names = ["version_a", "version_b", "version_c"]
    
    for i, version_name in enumerate(version_names):
        # 为每个版本添加不同的优化策略
        optimization_strategies = [
            f"优化策略A：增强清晰度和结构化",
            f"优化策略B：提高具体性和完整性",
            f"优化策略C：加强相关性和适应性"
        ]
        
        prompt_versions[version_name] = f"{current_prompt}\n\n优化说明: {optimization_strategies[i]}"
    
    return {
        "prompt_versions": prompt_versions,
        "current_stage": "multi_version_optimize"
    }


def _ab_test_prompt_versions(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    对多个提示词版本进行A/B测试
    """
    # 获取提示词版本
    prompt_versions = state.get('prompt_versions', {})
    
    # 模拟真实的A/B测试逻辑
    ab_test_results = {}
    
    # 为每个版本生成测试结果
    for version, prompt in prompt_versions.items():
        # 模拟质量评分（基于更复杂的评估逻辑）
        from src.research_core.prompt_eng_agents import advanced_evaluate_prompt_quality
        evaluation_result = advanced_evaluate_prompt_quality(prompt, state.get('requirement', ''))
        overall_score = evaluation_result.get('overall_score', 0.0)
        
        ab_test_results[version] = {
            "prompt": prompt,
            "score": overall_score,
            "details": evaluation_result,
            "feedback": "测试完成"
        }
    
    return {
        "ab_test_results": ab_test_results,
        "current_stage": "ab_test_versions"
    }


def _select_best_prompt_version(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    选择最佳提示词版本
    """
    ab_test_results = state.get('ab_test_results', {})
    
    if not ab_test_results:
        return {
            "current_stage": "select_best_version"
        }
    
    # 选择得分最高的版本
    best_version = max(ab_test_results.items(), key=lambda x: x[1]['score'])
    
    # 记录选择原因
    selection_reasoning = f"选择版本 {best_version[0]}，得分 {best_version[1]['score']:.2f}，优于其他版本"
    
    return {
        "current_prompt": best_version[1]['prompt'],
        "quality_score": best_version[1]['score'],
        "quality_evaluation_details": best_version[1]['details'],
        "selection_reasoning": selection_reasoning,
        "current_stage": "select_best_version"
    }


def _analyze_user_preferences(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    分析用户偏好
    
    Args:
        state: 当前状态
        
    Returns:
        更新的状态
    """
    interaction_history = state.get("interaction_history", [])
    
    # 这里应该实现实际的用户偏好分析逻辑
    # 目前使用简化的实现
    user_preferences = {
        "preferred_style": "technical",
        "preferred_length": "long",
        "preferred_structure": "structured",
        "feedback_tendency": "neutral"
    }
    
    return {
        "user_preferences": user_preferences,
        "current_stage": "user_analysis"
    }


def _personalized_analyze_requirement(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    个性化需求分析
    
    Args:
        state: 当前状态
        
    Returns:
        更新的状态
    """
    requirement = state.get("requirement", "")
    user_preferences = state.get("user_preferences", {})
    interaction_history = state.get("interaction_history", [])
    
    # 提取领域上下文
    domain_context = _extract_domain_context(requirement)
    
    # 生成个性化上下文
    personalized_context = _generate_personalized_context(user_preferences, domain_context)
    
    # 构建个性化分析提示词
    personalized_analysis_prompt = f"""作为提示词工程专家，请根据以下信息进行需求分析：

用户需求：{requirement}

{personalized_context}

请结合用户的个性化偏好和领域特点，提供针对性的需求分析。"""

    # 这里应该调用实际的LLM进行分析
    # 目前使用简化的实现
    analysis_result = f"基于用户偏好和领域特点的个性化分析结果：{requirement}"
    
    return {
        "requirement_analysis": analysis_result,
        "domain_context": domain_context,
        "user_preferences": user_preferences,
        "current_stage": "analysis"
    }


def _personalized_design_prompt(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    个性化提示词设计
    
    Args:
        state: 当前状态
        
    Returns:
        更新的状态
    """
    requirement_analysis = state.get("requirement_analysis", "")
    user_preferences = state.get("user_preferences", {})
    domain_context = state.get("domain_context", "")
    
    # 生成个性化设计提示词
    style_map = {
        "formal": "正式、专业的",
        "casual": "轻松、友好的",
        "technical": "技术性、精确的",
        "creative": "创意性、富有想象力的"
    }
    
    length_map = {
        "short": "简短精炼的",
        "medium": "中等长度的",
        "long": "详细全面的"
    }
    
    structure_map = {
        "structured": "结构化、条理清晰的",
        "freeform": "自由形式、灵活的"
    }
    
    style_desc = style_map.get(user_preferences.get("preferred_style", "technical"), "技术性、精确的")
    length_desc = length_map.get(user_preferences.get("preferred_length", "long"), "详细全面的")
    structure_desc = structure_map.get(user_preferences.get("preferred_structure", "structured"), "结构化、条理清晰的")
    
    personalized_prompt = f"""根据以下分析设计{style_desc}{length_desc}{structure_desc}提示词：

分析结果：{requirement_analysis}

领域上下文：{domain_context}

请设计符合用户偏好和领域特点的提示词。"""

    return {
        "current_prompt": personalized_prompt,
        "current_stage": "design"
    }


def _extract_domain_context(requirement: str) -> str:
    """
    根据需求提取领域上下文信息
    
    Args:
        requirement: 用户需求描述
        
    Returns:
        领域上下文信息
    """
    requirement_lower = requirement.lower()
    
    # 医疗领域关键词
    medical_keywords = ["医疗", "医学", "疾病", "治疗", "药物", "诊断", "health", "medical", "disease", "treatment"]
    if any(keyword in requirement_lower for keyword in medical_keywords):
        return "医疗领域"
    
    # 法律领域关键词
    legal_keywords = ["法律", "法规", "合同", "诉讼", "律师", "法院", "law", "legal", "contract", "court"]
    if any(keyword in requirement_lower for keyword in legal_keywords):
        return "法律领域"
    
    # 教育领域关键词
    education_keywords = ["教育", "教学", "学习", "课程", "学生", "教师", "education", "teaching", "learning", "course"]
    if any(keyword in requirement_lower for keyword in education_keywords):
        return "教育领域"
    
    # 技术领域关键词
    tech_keywords = ["技术", "开发", "编程", "软件", "系统", "tech", "development", "programming", "software"]
    if any(keyword in requirement_lower for keyword in tech_keywords):
        return "技术领域"
    
    # 默认返回通用领域
    return "通用领域"


def _generate_personalized_context(user_preferences: Dict[str, Any], domain_context: str) -> str:
    """
    生成个性化上下文信息
    
    Args:
        user_preferences: 用户偏好
        domain_context: 领域上下文
        
    Returns:
        个性化上下文信息
    """
    style_desc = {
        "formal": "正式、专业的语言风格",
        "casual": "轻松、随意的语言风格",
        "technical": "技术性、精确的语言风格",
        "creative": "创意性、富有想象力的语言风格"
    }
    
    length_desc = {
        "short": "简短精炼的内容",
        "medium": "中等长度的内容",
        "long": "详细全面的内容"
    }
    
    structure_desc = {
        "structured": "结构化、条理清晰的组织方式",
        "freeform": "自由形式、灵活的组织方式"
    }
    
    personalized_context = f"""
个性化设置：
1. 语言风格：{style_desc.get(user_preferences.get('preferred_style', 'formal'), '正式、专业的语言风格')}
2. 内容长度：{length_desc.get(user_preferences.get('preferred_length', 'medium'), '中等长度的内容')}
3. 组织结构：{structure_desc.get(user_preferences.get('preferred_structure', 'structured'), '结构化、条理清晰的组织方式')}

领域上下文：
{domain_context}
"""
    
    return personalized_context


graph = create_enhanced_prompt_engineering_workflow()
personalized_graph = create_personalized_prompt_engineering_workflow()
advanced_graph = create_advanced_prompt_engineering_workflow()
multi_version_graph = create_multi_version_prompt_engineering_workflow()

# 导出函数和类
__all__ = [
    'create_enhanced_prompt_engineering_workflow',
    'execute_enhanced_prompt_engineering_workflow',
    'create_personalized_prompt_engineering_workflow',
    'create_advanced_prompt_engineering_workflow',
    'create_multi_version_prompt_engineering_workflow'
]