"""
提示词工程代理函数
实现支持人工反馈的提示词工程各阶段代理
"""

import time
from typing import Dict, Any, Union, List, Optional, Callable, TypedDict
from pydantic import BaseModel
import re

from src.research_core.prompt_eng_state import PromptEngineeringState
from src.research_core.project_prompts import (
    PROMPT_ANALYSIS_PROMPT, 
    PROMPT_OPTIMIZATION_PROMPT, 
    PROMPT_EVALUATION_PROMPT
)
from src.research_core.prompt_templates import get_prompt_template_library
from src.research_core.performance_optimizer import apply_performance_optimization
from src.research_core.prompt_eng_modules import (
    IRequirementAnalysisModule,
    IPromptDesignModule,
    IPromptEvaluationModule,
    IPromptOptimizationModule,
    IHummanFeedbackModule,
    BasePromptEngineeringModule
)
import datetime


# 使用依赖注入减少循环导入
llm = None

def _initialize_components():
    """延迟初始化组件"""
    global llm
    if llm is None:
        from src.research_core.model import get_llm
        llm = get_llm()

# 添加PromptAnalysis类定义
class PromptAnalysis(BaseModel):
    analysis: str = ""

def evaluate_prompt_quality(prompt: str) -> float:
    """
    评估提示词质量（简化实现）
    """
    if not prompt:
        return 0.0
    
    # 简单的质量评估规则
    length_score = min(len(prompt) / 1000, 1.0)  # 长度评分
    structure_score = 0.5  # 结构评分（需要更复杂逻辑）
    clarity_score = 0.5    # 清晰度评分（需要更复杂逻辑）
    
    # 综合评分
    total_score = (length_score + structure_score + clarity_score) / 3
    return round(total_score, 2)

def _assess_clarity(prompt: str) -> float:
    """评估提示词的清晰度"""
    # 检查是否包含模糊词汇
    vague_words = ["可能", "大概", "也许", "差不多", "随意", "随便"]
    vague_count = sum(1 for word in vague_words if word in prompt)
    
    # 基于句子长度评估（过长的句子可能不够清晰）
    sentences = re.split(r'[.。！!？?]', prompt)
    avg_sentence_length = sum(len(s) for s in sentences) / len(sentences) if sentences else 0
    
    # 计算清晰度得分
    clarity_from_vagueness = max(0, 1 - (vague_count / max(1, len(prompt) / 50)))
    clarity_from_length = max(0, 1 - (max(0, avg_sentence_length - 25) / 50))
    
    return (clarity_from_vagueness + clarity_from_length) / 2

def _assess_completeness(prompt: str) -> float:
    """评估提示词的完整性"""
    # 检查是否包含关键元素
    key_elements = ["目标", "要求", "输出", "格式", "示例"]
    found_elements = sum(1 for element in key_elements if element in prompt)
    
    # 检查是否包含指导性语句
    guidance_indicators = ["请", "应该", "需要", "必须", "确保"]
    guidance_count = sum(1 for indicator in guidance_indicators if indicator in prompt)
    
    # 计算完整性得分
    element_score = found_elements / len(key_elements)
    guidance_score = min(1.0, guidance_count / 5)
    
    return (element_score + guidance_score) / 2

def _assess_specificity(prompt: str) -> float:
    """评估提示词的具体性"""
    # 检查是否包含具体示例或约束
    specific_indicators = ["例如", "比如", "具体来说", "限制条件", "约束", "边界情况"]
    specific_count = sum(1 for indicator in specific_indicators if indicator in prompt)
    
    # 检查是否包含量化指标
    import re
    quantifiers = r'\d+\s*(?:个|项|条|种|类|次|分钟|小时|天|月|年|%)'
    quantifier_matches = re.findall(quantifiers, prompt)
    
    # 计算具体性得分
    specificity_from_indicators = min(1.0, specific_count / len(specific_indicators))
    specificity_from_quantifiers = min(1.0, len(quantifier_matches) / 3)
    
    return (specificity_from_indicators + specificity_from_quantifiers) / 2

def _assess_relevance(prompt: str, requirement: str) -> float:
    """评估提示词与需求的相关性"""
    if not requirement:
        return 0.5
    
    # 简单的关键词匹配评估
    prompt_words = set(prompt.lower().split())
    requirement_words = set(requirement.lower().split())
    
    if not prompt_words or not requirement_words:
        return 0.5
    
    # 计算交集和并集
    intersection = prompt_words.intersection(requirement_words)
    union = prompt_words.union(requirement_words)
    
    # Jaccard相似度
    jaccard_similarity = len(intersection) / len(union) if union else 0
    
    # 包含需求关键词的比例
    requirement_coverage = len(intersection) / len(requirement_words) if requirement_words else 0
    
    return (jaccard_similarity + requirement_coverage) / 2

def _assess_structuredness(prompt: str) -> float:
    """评估提示词的结构化程度"""
    # 检查是否包含结构化元素
    structural_elements = [
        r'\d+\.\s',  # 数字列表
        r'-\s',      # 短横线列表
        r'\*\s',     # 星号列表
        r'###?\s',   # 标题
        r'\*\*.*?\*\*',  # 粗体
        r'\n\s*\n'   # 段落分隔
    ]
    
    structure_count = sum(1 for element in structural_elements if re.search(element, prompt))
    
    # 检查是否包含逻辑连接词
    logical_connectors = ["首先", "然后", "接着", "最后", "此外", "然而", "因此", "所以"]
    connector_count = sum(1 for connector in logical_connectors if connector in prompt)
    
    # 计算结构化得分
    structure_score = min(1.0, structure_count / len(structural_elements))
    logic_score = min(1.0, connector_count / len(logical_connectors))
    
    return (structure_score + logic_score) / 2

def _generate_quality_explanation(scores: Dict[str, float]) -> str:
    """
    生成质量评分解释
    
    Args:
        scores: 各项评分
        
    Returns:
        评分解释文本
    """
    overall_score = scores.get("overall_score", 0.0)
    
    if overall_score >= 0.8:
        quality_level = "优秀"
        recommendation = "提示词质量优秀，无需重大改进"
    elif overall_score >= 0.6:
        quality_level = "良好"
        recommendation = "提示词质量良好，可考虑进一步优化"
    elif overall_score >= 0.4:
        quality_level = "一般"
        recommendation = "提示词质量一般，建议进行优化"
    else:
        quality_level = "较差"
        recommendation = "提示词质量较差，需要大幅改进"
    
    # 找出最低分的维度
    lowest_dimension = min(scores.items(), key=lambda x: x[1]) if scores else ("unknown", 0)
    if lowest_dimension[1] < 0.5 and lowest_dimension[0] != "overall_score":
        dimension_names = {
            "clarity": "清晰度",
            "completeness": "完整性",
            "specificity": "具体性",
            "relevance": "相关性",
            "structuredness": "结构化",
            "ml_score": "机器学习评估",
            "contextual_adaptability": "上下文适应性",
            "diversity": "多样性",
            "safety": "安全性",
            "generalization": "泛化能力"
        }
        weak_dimension = dimension_names.get(lowest_dimension[0], lowest_dimension[0])
        recommendation += f"，特别注意改进{weak_dimension}方面"
    
    return f"综合评分{overall_score:.2f}，质量{quality_level}。{recommendation}"

def _assess_contextual_adaptability(prompt: str) -> float:
    """评估提示词的上下文适应性"""
    # 检查是否包含上下文相关的关键词
    context_keywords = ["根据上下文", "结合背景", "考虑情况", "适应场景", "灵活调整", "具体情况具体分析"]
    context_count = sum(1 for keyword in context_keywords if keyword in prompt)
    
    # 检查是否包含条件语句
    conditional_patterns = [r"如果.*?则", r"当.*?时", r"在.*?情况下", r"根据.*?条件"]
    conditional_count = sum(1 for pattern in conditional_patterns if re.search(pattern, prompt))
    
    # 计算适应性得分
    adaptability_from_keywords = min(1.0, context_count / len(context_keywords))
    adaptability_from_conditionals = min(1.0, conditional_count / len(conditional_patterns))
    
    return (adaptability_from_keywords + adaptability_from_conditionals) / 2

def _assess_diversity(prompt: str) -> float:
    """评估提示词的多样性"""
    # 计算词汇丰富度
    words = prompt.split()
    unique_words = set(words)
    
    if len(words) == 0:
        return 0.0
    
    # 词汇多样性比率
    diversity_ratio = len(unique_words) / len(words)
    
    # 检查是否包含多种类型的指导语句
    instruction_types = [
        r"请.*?分析", r"请.*?总结", r"请.*?解释", r"请.*?描述",
        r"请.*?列举", r"请.*?比较", r"请.*?评估", r"请.*?建议"
    ]
    
    instruction_count = sum(1 for pattern in instruction_types if re.search(pattern, prompt, re.IGNORECASE))
    instruction_diversity = min(1.0, instruction_count / len(instruction_types))
    
    return (diversity_ratio + instruction_diversity) / 2

def _assess_safety(prompt: str) -> float:
    """评估提示词的安全性"""
    # 检查是否包含安全相关的约束
    safety_keywords = [
        "避免", "不要", "禁止", "限制", "安全", "隐私", "合规",
        "道德", "法律", "有害", "敏感", "适当", "尊重", "保护"
    ]
    
    safety_count = sum(1 for keyword in safety_keywords if keyword in prompt)
    safety_ratio = min(1.0, safety_count / (len(safety_keywords) / 2))
    
    # 检查是否包含不当内容的关键词（应避免的）
    unsafe_keywords = [
        "暴力", "歧视", "偏见", "违法", "有害", "危险", "攻击", 
        "恶意", "欺骗", "操纵", "侵犯", "泄露", "滥用"
    ]
    
    unsafe_count = sum(1 for keyword in unsafe_keywords if keyword in prompt.lower())
    unsafe_penalty = unsafe_count / len(unsafe_keywords)
    
    # 安全性评分（越高越好，但如果有不当内容则降低评分）
    safety_score = max(0.0, safety_ratio - unsafe_penalty)
    
    return safety_score

def _assess_generalization(prompt: str) -> float:
    """评估提示词的泛化能力"""
    # 检查是否包含泛化相关的关键词
    generalization_keywords = [
        "通常", "一般情况下", "大多数情况", "普遍", "广泛适用",
        "通用", "标准", "常见", "典型", "代表性", "广泛"
    ]
    
    general_count = sum(1 for keyword in generalization_keywords if keyword in prompt)
    generalization_ratio = min(1.0, general_count / len(generalization_keywords))
    
    # 检查是否包含特定实例限制（降低泛化能力）
    specific_limiters = [
        "仅限", "只在", "只有", "特定", "唯一", "专门",
        "专门针对", "仅适用于", "局限于"
    ]
    
    specific_count = sum(1 for limiter in specific_limiters if limiter in prompt)
    specificity_penalty = min(0.5, specific_count / len(specific_limiters))
    
    # 泛化能力评分
    generalization_score = max(0.0, generalization_ratio - specificity_penalty)
    
    return generalization_score

def advanced_evaluate_prompt_quality(prompt: str, requirement: str = "") -> Dict[str, Any]:
    """
    高级提示词质量评估，包含多个维度的评估指标
    
    Args:
        prompt: 要评估的提示词
        requirement: 原始需求（可选，用于相关性评估）
        
    Returns:
        包含多个评估指标的字典
    """
    if not prompt:
        return {
            "overall_score": 0.0,
            "clarity": 0.0,
            "completeness": 0.0,
            "specificity": 0.0,
            "relevance": 0.0,
            "structuredness": 0.0,
            "ml_score": 0.0,
            "contextual_adaptability": 0.0,  # 上下文适应性
            "diversity": 0.0,                # 多样性
            "safety": 0.0,                   # 安全性
            "generalization": 0.0,           # 泛化能力
            "details": "提示词为空",
            "explanation": "无法评估空提示词"
        }
    
    # 1. 清晰度评估 - 基于语言的明确性和无歧义性
    clarity_score = _assess_clarity(prompt)
    
    # 2. 完整性评估 - 检查是否包含关键组成部分
    completeness_score = _assess_completeness(prompt)
    
    # 3. 具体性评估 - 检查提示词是否具体明确
    specificity_score = _assess_specificity(prompt)
    
    # 4. 相关性评估 - 检查与需求的相关性（如果有提供需求）
    relevance_score = _assess_relevance(prompt, requirement) if requirement else 0.5
    
    # 5. 结构化评估 - 检查提示词的结构化程度
    structuredness_score = _assess_structuredness(prompt)
    
    # 6. 上下文适应性评估
    contextual_adaptability_score = _assess_contextual_adaptability(prompt)
    
    # 7. 多样性评估
    diversity_score = _assess_diversity(prompt)
    
    # 8. 安全性评估
    safety_score = _assess_safety(prompt)
    
    # 9. 泛化能力评估
    generalization_score = _assess_generalization(prompt)
    
    # 10. 机器学习辅助评估
    try:
        # 尝试导入ML评估器
        from src.research_core.ml_prompt_evaluator import create_ml_evaluator_with_examples
        ml_evaluator = create_ml_evaluator_with_examples()
        ml_evaluation = ml_evaluator.evaluate_prompt(prompt, requirement)
        ml_score = ml_evaluation.get("ml_score", 0.0)
    except Exception:
        # 如果无法使用ML评估器，使用默认值
        ml_score = 0.0
    
    # 计算综合得分（结合传统评估和ML评估）
    overall_score = (
        clarity_score * 0.1 +
        completeness_score * 0.15 +
        specificity_score * 0.1 +
        relevance_score * 0.1 +
        structuredness_score * 0.1 +
        contextual_adaptability_score * 0.1 +
        diversity_score * 0.05 +
        safety_score * 0.1 +
        generalization_score * 0.1 +
        ml_score * 0.1  # ML评估占10%权重
    )
    
    # 生成评分解释
    explanation = _generate_quality_explanation({
        "clarity": clarity_score,
        "completeness": completeness_score,
        "specificity": specificity_score,
        "relevance": relevance_score,
        "structuredness": structuredness_score,
        "contextual_adaptability": contextual_adaptability_score,
        "diversity": diversity_score,
        "safety": safety_score,
        "generalization": generalization_score,
        "ml_score": ml_score,
        "overall_score": overall_score
    })
    
    return {
        "overall_score": round(overall_score, 2),
        "clarity": round(clarity_score, 2),
        "completeness": round(completeness_score, 2),
        "specificity": round(specificity_score, 2),
        "relevance": round(relevance_score, 2),
        "structuredness": round(structuredness_score, 2),
        "ml_score": round(ml_score, 2),
        "contextual_adaptability": round(contextual_adaptability_score, 2),
        "diversity": round(diversity_score, 2),
        "safety": round(safety_score, 2),
        "generalization": round(generalization_score, 2),
        "details": "评估完成",
        "explanation": explanation
    }

def _extract_domain_context(requirement: str) -> str:
    """
    根据需求提取领域上下文信息
    
    Args:
        requirement: 用户需求描述
        
    Returns:
        领域上下文信息
    """
    requirement_lower = requirement.lower()
    
    # 医疗领域关键词
    medical_keywords = ["医疗", "医学", "疾病", "治疗", "药物", "诊断", "health", "medical", "disease", "treatment"]
    if any(keyword in requirement_lower for keyword in medical_keywords):
        return "医疗领域：需要遵循医学规范，使用专业术语，确保信息准确性"
    
    # 法律领域关键词
    legal_keywords = ["法律", "法规", "合同", "诉讼", "律师", "法院", "law", "legal", "contract", "court"]
    if any(keyword in requirement_lower for keyword in legal_keywords):
        return "法律领域：需要严格遵循法律条文，使用正式语言，确保表述准确无误"
    
    # 教育领域关键词
    education_keywords = ["教育", "教学", "学习", "课程", "学生", "教师", "education", "teaching", "learning", "course"]
    if any(keyword in requirement_lower for keyword in education_keywords):
        return "教育领域：需要采用教育学专业术语，注重知识传递的准确性和易懂性"
    
    # 技术领域关键词
    tech_keywords = ["技术", "开发", "编程", "软件", "系统", "tech", "development", "programming", "software"]
    if any(keyword in requirement_lower for keyword in tech_keywords):
        return "技术领域：需要使用技术术语，注重逻辑性和实现细节"
    
    # 默认返回通用领域
    return "通用领域：适用于多种场景的通用提示词策略"


def _extract_user_preferences(interaction_history: List[Dict[str, Any]]) -> Dict[str, Any]:
    """
    从交互历史中提取用户偏好
    
    Args:
        interaction_history: 用户交互历史记录
        
    Returns:
        用户偏好字典
    """
    preferences = {
        "preferred_style": "formal",  # formal, casual, technical, creative
        "preferred_length": "medium",  # short, medium, long
        "preferred_structure": "structured",  # structured, freeform
        "feedback_tendency": "neutral"  # positive, negative, neutral
    }
    
    if not interaction_history:
        return preferences
    
    # 分析历史反馈
    positive_feedback_count = 0
    negative_feedback_count = 0
    formal_keywords = ["正式", "专业", "严谨", "formal", "professional"]
    casual_keywords = ["轻松", "随意", "简单", "casual", "simple"]
    technical_keywords = ["技术", "代码", "technical", "code"]
    creative_keywords = ["创意", "创新", "creative", "innovative"]
    
    for interaction in interaction_history:
        feedback = interaction.get("content", "").lower()
        if any(keyword in feedback for keyword in ["好", "棒", "优秀", "good", "great", "excellent"]):
            positive_feedback_count += 1
        elif any(keyword in feedback for keyword in ["差", "不好", "糟糕", "bad", "poor"]):
            negative_feedback_count += 1
            
        if any(keyword in feedback for keyword in formal_keywords):
            preferences["preferred_style"] = "formal"
        elif any(keyword in feedback for keyword in casual_keywords):
            preferences["preferred_style"] = "casual"
        elif any(keyword in feedback for keyword in technical_keywords):
            preferences["preferred_style"] = "technical"
        elif any(keyword in feedback for keyword in creative_keywords):
            preferences["preferred_style"] = "creative"
    
    # 判断反馈倾向
    if positive_feedback_count > negative_feedback_count:
        preferences["feedback_tendency"] = "positive"
    elif negative_feedback_count > positive_feedback_count:
        preferences["feedback_tendency"] = "negative"
    
    return preferences


def _generate_personalized_context(user_preferences: Dict[str, Any], domain_context: str) -> str:
    """
    生成个性化上下文信息
    
    Args:
        user_preferences: 用户偏好
        domain_context: 领域上下文
        
    Returns:
        个性化上下文信息
    """
    style_desc = {
        "formal": "正式、专业的语言风格",
        "casual": "轻松、随意的语言风格",
        "technical": "技术性、精确的语言风格",
        "creative": "创意性、富有想象力的语言风格"
    }
    
    length_desc = {
        "short": "简短精炼的内容",
        "medium": "中等长度的内容",
        "long": "详细全面的内容"
    }
    
    structure_desc = {
        "structured": "结构化、条理清晰的组织方式",
        "freeform": "自由形式、灵活的组织方式"
    }
    
    personalized_context = f"""
个性化设置：
1. 语言风格：{style_desc.get(user_preferences.get('preferred_style', 'formal'), '正式、专业的语言风格')}
2. 内容长度：{length_desc.get(user_preferences.get('preferred_length', 'medium'), '中等长度的内容')}
3. 组织结构：{structure_desc.get(user_preferences.get('preferred_structure', 'structured'), '结构化、条理清晰的组织方式')}

领域上下文：
{domain_context}
"""
    
    return personalized_context

class RequirementAnalysisModule(BasePromptEngineeringModule, IRequirementAnalysisModule):
    """
    需求分析模块实现
    分析用户提示词需求，生成需求分析报告
    增强对任务上下文的理解能力，根据不同领域优化提示词生成策略
    """
    
    def __init__(self):
        super().__init__("requirement_analysis", "分析用户提示词需求，生成需求分析报告")
    
    def execute(self, state: PromptEngineeringState) -> Dict[str, Any]:
        """执行需求分析"""
        # 应用性能优化
        state = apply_performance_optimization(state)
        result = self.analyze_requirement(state.get('requirement', ''))
        return result

    def analyze_requirement(self, requirement: str) -> Dict[str, Any]:
        """分析用户需求"""
        _initialize_components()
        
        # 确保llm已初始化
        if llm is None:
            _initialize_components()
            if llm is None:
                raise Exception("Failed to initialize LLM")
        
        # 获取当前状态
        state = PromptEngineeringState(
            requirement="",
            requirement_analysis="",
            current_prompt=None,
            human_feedback=None,
            feedback_history=[],
            optimization_goal=None,
            prompt_evaluation=None,
            final_prompt=None,
            design_reasoning=None,
            iteration_count=0,
            workflow_complete=False,
            human_intervened=False,
            current_stage="",
            quality_score=0.0,
            execution_time={},
            context_info={},
            metadata={},
            ab_test_results=None,
            performance_metrics=[],
            user_feedbacks=[],
            optimization_history=[],
            prompt_versions={},
            test_results=None,
            user_preferences={},
            domain_context=None,
            interaction_history=[],
            personalization_settings={},
            template_recommendations=[],
            quality_evaluation_details=None,
            ab_test_variants=[],
            quality_history=[],
            decision_log=[],
            workflow_metrics={}
        )
        
        # 获取模板库并推荐合适的模板
        template_library = get_prompt_template_library()
        recommended_templates = template_library.recommend_templates(requirement)
        
        # 根据不同领域优化提示词生成策略
        domain_context = _extract_domain_context(requirement)
        
        # 获取用户偏好（如果存在）
        user_preferences = state.get("user_preferences", {})
        
        # 构建增强的分析提示词
        enhanced_analysis_prompt = f"""{PROMPT_ANALYSIS_PROMPT.format_messages(
            prompt_requirement=requirement
        )[0].content}
        
额外上下文信息：
1. 推荐的提示词模板: {', '.join([t.get('name', 'N/A') if isinstance(t, dict) else str(t) for t in recommended_templates]) if recommended_templates else '无'}
2. 领域上下文: {domain_context}
3. 用户偏好: {str(user_preferences) if user_preferences else '未提供'}
        
请基于以上信息，提供更精准的需求分析，包括：
1. 核心功能需求分析
2. 预期输出格式和结构
3. 目标用户和使用场景
4. 关键约束和边界条件
5. 可能的挑战和解决方案建议"""

        # 调用LLM进行分析，处理structured output可能不支持的情况
        try:
            analyzer = llm.with_structured_output(PromptAnalysis)
            # 修复：PROMPT_ANALYSIS_PROMPT是一个ChatPromptTemplate对象，不能直接格式化
            # 我们需要将需求作为参数传递给模板
            analysis_result = analyzer.invoke(PROMPT_ANALYSIS_PROMPT.format_messages(
                prompt_requirement=requirement
                ))
        except Exception as e:
            # 如果structured output不支持，则使用普通调用方式
            if "response_format" in str(e) or "structured" in str(e).lower():
                # 构建普通提示词
                plain_prompt = f"""{enhanced_analysis_prompt}

请以如下JSON格式返回结果:
{{"analysis": "你的分析内容"}}

只需返回有效的JSON，不要包含其他内容。"""
                
                response = llm.invoke([("human", plain_prompt)])
                response_content = response.content if hasattr(response, 'content') else str(response)
                
                # 尝试解析JSON响应
                import json
                try:
                    # 确保response_content是字符串类型
                    response_content_str = response_content if isinstance(response_content, str) else str(response_content)
                    response_dict = json.loads(response_content_str)
                    analysis_result = PromptAnalysis(analysis=response_dict.get("analysis", response_content_str))
                except json.JSONDecodeError:
                    # 如果JSON解析失败，将整个响应作为分析内容
                    response_content_str = response_content if isinstance(response_content, str) else str(response_content)
                    analysis_result = PromptAnalysis(analysis=response_content_str)
            else:
                # 如果是其他异常，重新抛出
                raise e
        
        # 处理分析结果，确保类型正确
        if isinstance(analysis_result, PromptAnalysis):
            requirement_analysis = analysis_result.analysis
        elif isinstance(analysis_result, dict):
            requirement_analysis = analysis_result.get("analysis", str(analysis_result))
        else:
            requirement_analysis = str(analysis_result)
            
        return {
            "requirement_analysis": requirement_analysis,
            "context_info": {
                "recommended_templates": recommended_templates,
                "domain_context": domain_context,
                "analysis_timestamp": datetime.datetime.now().isoformat()
            },
            "current_stage": "analysis"
        }


class PromptDesignModule(BasePromptEngineeringModule, IPromptDesignModule):
    """
    提示词设计模块实现
    根据需求分析设计初始提示词
    """
    
    def __init__(self):
        super().__init__("prompt_design", "根据需求分析设计初始提示词")
    
    def execute(self, state: PromptEngineeringState) -> Dict[str, Any]:
        """执行提示词设计"""
        # 应用性能优化
        state = apply_performance_optimization(state)
        # 重新组织上下文信息，将需要的信息放入context_info中
        context_info = state.get('context_info', {})
        context_info['original_requirement'] = state.get('requirement', '')
        context_info['user_preferences'] = state.get('user_preferences', {})
        
        result = self.design_prompt(
            state.get('requirement_analysis', ''),
            context_info
        )
        return {
            "current_prompt": result,
            "current_stage": "design"
        }
    
    def design_prompt(self, requirement_analysis: str, context_info: Optional[Dict[str, Any]] = None) -> str:
        """设计提示词"""
        _initialize_components()
        
        # 从上下文信息中提取领域上下文和模板推荐
        domain_context = ""
        recommended_templates = []
        template_recommendations = []
        original_requirement = ""
        user_preferences = {}
        
        if context_info:
            domain_context = context_info.get("domain_context", "")
            recommended_templates = context_info.get("recommended_templates", [])
            template_recommendations = recommended_templates  # 保存模板推荐结果
            original_requirement = context_info.get("original_requirement", "")
            user_preferences = context_info.get("user_preferences", {})
        
        # 根据用户偏好调整设计风格
        style_preference = user_preferences.get("preferred_style", "technical")
        length_preference = user_preferences.get("preferred_length", "long")
        structure_preference = user_preferences.get("preferred_structure", "structured")
        
        # 构建设计提示词
        design_prompt = f"""基于以下需求分析设计一个高质量的提示词：

需求分析：
{requirement_analysis}

原始需求：
{original_requirement}

领域上下文：
{domain_context}

推荐的模板：
{', '.join([t.get('name', 'N/A') for t in recommended_templates]) if recommended_templates else '无'}

用户偏好设置：
- 语言风格: {style_preference}
- 内容长度: {length_preference}
- 组织结构: {structure_preference}

请按照以下结构设计提示词：
1. 系统角色定义：明确AI应该扮演的角色和专业领域
2. 任务说明：详细描述需要执行的任务和关键步骤
3. 输出格式：明确定义输出的结构和格式要求
4. 约束条件：列出必要的限制和边界条件
5. 示例（如果适用）：提供具体的示例以帮助理解

设计的提示词应该清晰、完整、具体，并且能够产生高质量的输出。

输出要求：
1. 使用{style_preference}语言风格
2. 提供{length_preference}长度的内容
3. 采用{structure_preference}组织方式
4. 只返回设计好的提示词，不要包含其他解释内容。"""

        # 调用LLM生成提示词
        if llm is None:
            _initialize_components()
            if llm is None:
                raise Exception("Failed to initialize LLM")
                
        response = llm.invoke([("human", design_prompt)])
        
        generated_prompt = ""
        if hasattr(response, 'content'):
            generated_prompt = str(response.content)
        else:
            generated_prompt = str(response)
            
        return generated_prompt.strip()


class PromptEvaluationModule(BasePromptEngineeringModule, IPromptEvaluationModule):
    """
    提示词评估模块实现
    评估当前提示词质量
    """
    
    def __init__(self):
        super().__init__("prompt_evaluation", "评估当前提示词质量")
    
    def execute(self, state: PromptEngineeringState) -> Dict[str, Any]:
        """执行提示词评估"""
        # 应用性能优化
        state = apply_performance_optimization(state)
        result = self.evaluate_prompt(
            state.get('current_prompt', '') or '',
            state.get('requirement', '')
        )
        
        # 获取现有的质量历史记录
        quality_history = state.get('quality_history', []).copy()
        # 添加当前评估结果到历史记录
        if 'quality_evaluation_details' in result:
            quality_history.append(result['quality_evaluation_details'])
        
        result['quality_history'] = quality_history
        return result
    
    def evaluate_prompt(self, prompt: str, requirement: str) -> Dict[str, Any]:
        """评估提示词质量"""
        _initialize_components()
        
        if not prompt:
            return {
                "prompt_evaluation": "无法评估空提示词",
                "quality_score": 0.0,
                "current_stage": "evaluation"
            }
        
        # 调用高级评估函数
        evaluation_result = advanced_evaluate_prompt_quality(prompt, requirement)
        
        return {
            "prompt_evaluation": evaluation_result.get("explanation", "评估完成"),
            "quality_score": evaluation_result.get("overall_score", 0.0),
            "quality_evaluation_details": evaluation_result,
            "current_stage": "evaluation"
        }


class PromptOptimizationModule(BasePromptEngineeringModule, IPromptOptimizationModule):
    """
    提示词优化模块实现
    优化当前提示词，支持多版本并行优化
    """
    
    def __init__(self):
        super().__init__("prompt_optimization", "优化当前提示词")
        self.max_parallel_versions = 3  # 最大并行优化版本数
    
    def execute(self, state: PromptEngineeringState) -> Dict[str, Any]:
        """执行提示词优化，支持多版本并行优化"""
        # 应用性能优化
        state = apply_performance_optimization(state)
        
        # 获取当前提示词和评估结果
        current_prompt = state.get('current_prompt', '') or ''
        evaluation_details = state.get('quality_evaluation_details', {}) or {}
        human_feedback = state.get('human_feedback')
        
        # 执行优化以获取最佳版本
        optimization_result = self.optimize_prompt(
            current_prompt,
            evaluation_details,
            human_feedback
        )
        
        # 如果返回的是字典（包含多个版本），提取最佳版本
        if isinstance(optimization_result, dict):
            best_version = optimization_result.get('best_version', current_prompt)
            prompt_versions = optimization_result.get('prompt_versions', {})
        else:
            # 如果返回的是字符串（向后兼容）
            best_version = optimization_result
            prompt_versions = {}
        
        # 返回优化结果
        return {
            "current_prompt": best_version,
            "prompt_versions": prompt_versions,
            "current_stage": "optimization"
        }
    
    def optimize_prompt(self, prompt: str, evaluation_result: Dict[str, Any], 
                       feedback: Optional[str] = None) -> Union[str, Dict[str, Any]]:
        """优化提示词，生成多个优化版本并选择最佳版本"""
        _initialize_components()
        
        if not prompt:
            return prompt
        
        # 确保llm已初始化
        if llm is None:
            _initialize_components()
            if llm is None:
                raise Exception("Failed to initialize LLM")
        
        # 获取优化目标
        optimization_goal = evaluation_result.get("explanation", 
                            "提高提示词的清晰度、完整性和相关性，确保能产生高质量的输出")
        
        # 构建优化提示词（请求生成多个优化版本）
        optimization_prompt = f"""基于以下提示词和评估结果，生成{self.max_parallel_versions}个不同的优化版本：

当前提示词：
{prompt}

评估结果：
{optimization_goal}

人工反馈：
{feedback or "无具体反馈"}

请按照以下格式返回结果：
### 优化版本A
[第一个优化版本的完整提示词]

### 优化版本B
[第二个优化版本的完整提示词]

### 优化版本C
[第三个优化版本的完整提示词]

每个版本应该在不同方面进行优化，例如：
1. 版本A：专注于改进结构和清晰度
2. 版本B：专注于增强内容完整性和具体性
3. 版本C：专注于提高相关性和适应性

只需返回优化后的提示词版本，不要包含其他解释内容。"""

        try:
            # 调用LLM进行优化
            response = llm.invoke([("human", optimization_prompt)])
            
            if hasattr(response, 'content'):
                response_content = str(response.content)
            else:
                response_content = str(response)
            
            # 解析响应中的多个版本
            optimized_versions = self._parse_optimized_versions(response_content)
            
            # 如果没有解析到优化版本，使用原始响应作为单一版本
            if not optimized_versions:
                return response_content
            
            # 评估各版本质量
            evaluation_results = [
                advanced_evaluate_prompt_quality(version, prompt) 
                for version in optimized_versions
            ]
            
            # 保存所有版本以供A/B测试
            prompt_versions = {}
            for i, version in enumerate(optimized_versions):
                version_key = f"version_{chr(65+i)}"  # version_A, version_B, version_C
                prompt_versions[version_key] = version
            
            # 选择最佳版本
            if evaluation_results:
                best_version_index = max(
                    range(len(evaluation_results)), 
                    key=lambda i: evaluation_results[i]["overall_score"]
                )
                best_version = optimized_versions[best_version_index]
                best_version_key = f"version_{chr(65+best_version_index)}"
            else:
                best_version = prompt
                best_version_key = "original"
            
            # 直接返回最佳版本字符串以确保与接口一致
            return best_version
            
        except Exception as e:
            # 错误处理
            print(f"Error during prompt optimization: {str(e)}")
            return prompt
    
    def _parse_optimized_versions(self, response_content: str) -> List[str]:
        """
        解析LLM响应中的多个优化版本
        """
        # 简单的解析逻辑示例 - 实际应根据LLM输出格式调整
        versions = []
        
        # 按照版本分隔符分割
        version_delimiter = "### 优化版本"
        content_parts = response_content.split(version_delimiter)
        
        if len(content_parts) > 1:
            # 跳过第一个空部分
            for part in content_parts[1:]:
                # 去除版本号和描述，提取纯提示词内容
                end_markers = ["###", "=====", "**接下来是版本**"]
                for marker in end_markers:
                    if marker in part:
                        content_end = part.find(marker)
                        if content_end > 0:
                            part = part[:content_end]
                            break
                
                # 清理内容
                cleaned_part = part.strip()
                if cleaned_part:
                    versions.append(cleaned_part)
        else:
            # 如果没有找到明确的分隔符，尝试按特殊标记分割
            import re
            pattern = r'版本\s*\d+[:：]|\d+\.\s*优化结果|[Vv]ersion\s*[A-Z][:]?'
            parts = re.split(pattern, response_content, flags=re.IGNORECASE)
            
            for part in parts:
                cleaned_part = part.strip()
                if cleaned_part:
                    versions.append(cleaned_part)
        
        return versions

# 保持原有的函数接口以确保向后兼容性
def analyze_prompt_requirement(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    分析用户提示词需求，生成需求分析报告
    """
    module = RequirementAnalysisModule()
    return module.execute(state)


def design_initial_prompt(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    设计初始提示词
    """
    module = PromptDesignModule()
    return module.execute(state)

def evaluate_prompt(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    评估当前提示词质量
    """
    module = PromptEvaluationModule()
    return module.execute(state)


def optimize_prompt(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    优化当前提示词，支持多版本并行优化
    """
    module = PromptOptimizationModule()
    # 简化实现，实际应该从状态中获取所需参数
    current_prompt = state.get('current_prompt', '') or ''
    evaluation_details = state.get('quality_evaluation_details', {}) or {}
    
    best_version = module.optimize_prompt(current_prompt, evaluation_details)
    
    # 为向后兼容性，返回当前最佳版本作为current_prompt
    return {
        "current_prompt": best_version,
        "current_stage": "optimization"
    }


def process_human_feedback(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    处理人工反馈（支持多种反馈类型）
    """
    human_feedback = state.get('human_feedback', '')
    
    if human_feedback:
        # 解析反馈类型
        feedback_type = "general"  # 默认为通用反馈
        feedback_priority = "medium"  # 默认优先级
        
        # 根据关键词识别反馈类型和优先级
        if any(keyword in human_feedback for keyword in ["结构", "structure", "组织", "框架"]):
            feedback_type = "structure"
            feedback_priority = "high"
        elif any(keyword in human_feedback for keyword in ["内容", "content", "信息", "要点"]):
            feedback_type = "content"
            feedback_priority = "high"
        elif any(keyword in human_feedback for keyword in ["格式", "format", "样式", "外观"]):
            feedback_type = "format"
            feedback_priority = "medium"
        elif any(keyword in human_feedback for keyword in ["错误", "error", "问题", "bug"]):
            feedback_type = "error"
            feedback_priority = "high"
        elif any(keyword in human_feedback for keyword in ["性能", "performance", "速度", "效率"]):
            feedback_type = "performance"
            feedback_priority = "medium"
        
        # 记录人工反馈到历史中
        feedback_entry = {
            "type": "human",
            "content": human_feedback,
            "feedback_type": feedback_type,
            "priority": feedback_priority,
            "timestamp": __import__('datetime').datetime.now().isoformat()
        }
        
        feedback_history = state.get('feedback_history', []).copy()
        feedback_history.append(feedback_entry)
        
        # 根据反馈类型设置优化策略
        optimization_strategy = ""
        if feedback_type == "structure":
            optimization_strategy = "focus_on_structure"
        elif feedback_type == "content":
            optimization_strategy = "focus_on_content"
        elif feedback_type == "format":
            optimization_strategy = "focus_on_format"
        elif feedback_type == "error":
            optimization_strategy = "fix_errors"
        elif feedback_type == "performance":
            optimization_strategy = "improve_performance"
        
        return {
            "human_intervened": True,
            "feedback_history": feedback_history,
            "optimization_strategy": optimization_strategy,
            "current_stage": "feedback_processed"
        }
    else:
        return {
            "current_stage": "no_feedback"
        }


def _assess_feedback_priority(feedback: str, feedback_type: str) -> str:
    """
    评估反馈优先级
    
    Args:
        feedback: 反馈内容
        feedback_type: 反馈类型
        
    Returns:
        优先级（high, medium, low）
    """
    # 关键词权重
    high_priority_keywords = ["错误", "失败", "无法", "error", "fail", "broken", "critical", "严重", "必须"]
    medium_priority_keywords = ["改进", "优化", "建议", "improve", "optimize", "suggest", "可以", "应该"]
    
    # 根据类型确定基础优先级
    type_priority = {
        "safety": "high",
        "structure": "medium",
        "content": "medium",
        "format": "low",
        "clarity": "medium"
    }
    
    base_priority = type_priority.get(feedback_type, "medium")
    
    # 检查关键词
    high_count = sum(1 for keyword in high_priority_keywords if keyword in feedback.lower())
    medium_count = sum(1 for keyword in medium_priority_keywords if keyword in feedback.lower())
    
    # 根据关键词调整优先级
    if high_count > 0:
        return "high"
    elif medium_count > 1:
        return "medium"
    else:
        return base_priority

def finalize_prompt(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    最终确定提示词
    """
    current_prompt = state.get('current_prompt', '')
    
    # 提取最终提示词
    final_prompt = str(current_prompt) if current_prompt else ""
    design_reasoning = "基于需求分析和多轮优化生成的提示词"
    
    # 评估最终提示词质量
    quality_score = evaluate_prompt_quality(final_prompt)
    
    return {
        "final_prompt": final_prompt,
        "design_reasoning": design_reasoning,
        "workflow_complete": True,
        "quality_score": quality_score,
        "current_stage": "final"
    }

def should_continue_analysis(state: PromptEngineeringState) -> str:
    """
    决定是否继续优化
    """
    iteration_count = state.get('iteration_count', 0)
    human_intervened = state.get('human_intervened', False)
    quality_score = state.get('quality_score', 0.0)
    quality_history = state.get('quality_history', [])
    
    # 检查质量评分是否稳定（最近3次评分差异小于0.05）
    quality_stable = False
    if len(quality_history) >= 3:
        recent_scores = [item.get('overall_score', 0) if isinstance(item, dict) else item for item in quality_history[-3:]]
        score_diffs = [abs(recent_scores[i] - recent_scores[i-1]) for i in range(1, len(recent_scores))]
        quality_stable = all(diff < 0.05 for diff in score_diffs)
    
    # 如果有人工介入，继续优化
    if human_intervened:
        decision = "optimize"
        reasoning = "检测到人工介入，需要根据反馈进行优化"
    # 如果质量评分稳定且达到阈值，或者迭代次数超限，则停止优化
    elif (quality_stable and quality_score >= 0.85) or iteration_count >= 5:
        decision = "finalize"
        reasoning = f"质量评分达标或已达最大迭代次数，结束优化流程"
    # 如果质量评分未达到阈值且迭代次数未超限，则继续优化
    elif quality_score < 0.85 and iteration_count < 5:
        decision = "optimize"
        reasoning = f"质量评分({quality_score})低于阈值(0.85)且迭代次数({iteration_count})未超限(5)"
    else:
        decision = "finalize"
        reasoning = "满足终止条件，结束优化流程"
    
    # 记录决策日志
    decision_entry = {
        "decision": decision,
        "reasoning": reasoning,
        "factors": {
            "human_intervened": human_intervened,
            "quality_score": quality_score,
            "iteration_count": iteration_count,
            "quality_stable": quality_stable,
            "quality_threshold": 0.85,
            "max_iterations": 5
        },
        "timestamp": __import__('datetime').datetime.now().isoformat()
    }
    
    # 更新决策日志
    decision_log = state.get('decision_log', [])
    decision_log.append(decision_entry)
    
    return decision


def should_process_feedback(state: PromptEngineeringState) -> str:
    """
    决定是否需要处理人工反馈
    """
    human_feedback = state.get('human_feedback', '')
    
    if human_feedback:
        decision = "process_feedback"
        reasoning = f"检测到人工反馈内容，长度为{len(human_feedback)}字符"
    else:
        decision = "evaluate"
        reasoning = "未检测到人工反馈，直接进入评估阶段"
    
    # 添加决策解释到状态中
    decision_explanation = {
        "decision": decision,
        "reasoning": reasoning,
        "factors": {
            "human_feedback_present": bool(human_feedback),
            "feedback_length": len(human_feedback) if human_feedback else 0
        },
        "timestamp": __import__('datetime').datetime.now().isoformat()
    }
    
    # 注意：由于代理函数的限制，我们无法直接将解释添加到状态中
    # 在实际应用中，这可以通过共享状态或日志系统实现
    
    return decision

def ab_test_prompts(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    执行A/B测试比较不同版本提示词
    """
    # 延迟初始化组件
    _initialize_components()
    
    # 获取当前提示词和历史版本
    current_prompt = state.get('current_prompt', '')
    prompt_versions = state.get('prompt_versions', {})
    
    # 简化的A/B测试逻辑 - 实际实现中应该包含更复杂的测试逻辑
    ab_test_results = {
        "version_a": {
            "prompt": current_prompt,
            "accuracy": 0.85,
            "efficiency": 0.78,
            "user_satisfaction": 0.82
        },
        "version_b": {
            "prompt": prompt_versions.get("previous", ""),
            "accuracy": 0.79,
            "efficiency": 0.81,
            "user_satisfaction": 0.76
        }
    }
    
    # 确定最佳版本
    avg_a = sum(ab_test_results["version_a"].values()) / 3
    avg_b = sum(ab_test_results["version_b"].values()) / 3
    
    best_version = "version_a" if avg_a > avg_b else "version_b"
    
    return {
        "ab_test_results": ab_test_results,
        "best_version": best_version,
        "current_prompt": ab_test_results[best_version]["prompt"] if best_version == "version_a" else current_prompt,
        "current_stage": "ab_test_complete"
    }

def collect_user_feedback(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    收集和处理用户反馈
    """
    # 模拟收集用户反馈的逻辑
    user_feedbacks = state.get('user_feedbacks', [])
    
    # 添加新的反馈条目
    new_feedback = {
        "timestamp": datetime.datetime.now().isoformat(),
        "feedback_type": "user",
        "content": "用户反馈内容示例",
        "rating": 4.5
    }
    
    user_feedbacks.append(new_feedback)
    
    return {
        "user_feedbacks": user_feedbacks,
        "current_stage": "feedback_collected"
    }

def auto_optimize_prompt(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    基于评估结果和反馈自动优化提示词
    """
    # 延迟初始化组件
    _initialize_components()
    
    # 获取当前提示词和评估结果
    current_prompt = state.get('current_prompt', '')
    prompt_evaluation = state.get('prompt_evaluation', '')
    ab_test_results = state.get('ab_test_results', {})
    
    # 记录优化历史
    optimization_entry = {
        "timestamp": datetime.datetime.now().isoformat(),
        "previous_prompt": current_prompt,
        "optimization_reason": "基于A/B测试和评估结果的自动优化",
        "changes_made": "优化了提示词结构和内容"
    }
    
    optimization_history = state.get('optimization_history', [])
    optimization_history.append(optimization_entry)
    
    # 生成优化后的提示词（简化实现）
    optimized_prompt = f"优化后的提示词，基于以下评估: {prompt_evaluation}"
    
    return {
        "current_prompt": optimized_prompt,
        "optimization_history": optimization_history,
        "current_stage": "auto_optimized"
    }

def test_prompt_in_production(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    在实际应用场景中测试提示词效果
    """
    # 获取当前提示词
    current_prompt = state.get('current_prompt', '')
    
    # 模拟生产环境测试结果
    test_results = {
        "test_environment": "production_simulation",
        "success_rate": 0.92,
        "average_response_time": 1.25,
        "user_satisfaction": 0.88,
        "error_rate": 0.03
    }
    
    return {
        "test_results": test_results,
        "current_stage": "production_test_complete"
    }

def run_ab_test(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    运行A/B测试以比较不同提示词版本的效果
    """
    start_time = time.time()
    _initialize_components()
    
    current_prompt = state.get('current_prompt', '')
    ab_test_variants = state.get('ab_test_variants', [])
    
    if not current_prompt or not ab_test_variants:
        return {
            "ab_test_results": "缺少必要的输入数据",
            "current_stage": "ab_test"
        }
    
    try:
        # 为每个变体生成响应
        test_results = []
        for variant in ab_test_variants:
            messages = [("human", variant)]
            if llm is None:
                _initialize_components()
            if llm is None:
                raise Exception("LLM未初始化")
            
            response = llm.invoke(messages)
            test_results.append({
                "variant": variant,
                "response": response.content if hasattr(response, 'content') else str(response),
                "quality_score": evaluate_prompt_quality(variant)
            })
        
        # 简单的评估逻辑 - 实际应用中应使用更复杂的评估方法
        best_variant = max(test_results, key=lambda x: x['quality_score'])
        
        end_time = time.time()
        execution_time = state.get('execution_time', {}).copy()
        execution_time['ab_test'] = end_time - start_time
        
        return {
            "ab_test_results": {
                "all_results": test_results,
                "best_variant": best_variant['variant'],
                "best_variant_score": best_variant['quality_score']
            },
            "current_prompt": best_variant['variant'],
            "quality_score": best_variant['quality_score'],
            "current_stage": "ab_test",
            "execution_time": execution_time
        }
    except Exception as e:
        end_time = time.time()
        execution_time = state.get('execution_time', {}).copy()
        execution_time['ab_test'] = end_time - start_time
        
        return {
            "ab_test_results": f"A/B测试失败: {str(e)}",
            "current_stage": "ab_test",
            "execution_time": execution_time
        }


def monitor_production_prompt(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    监控生产环境中的提示词表现
    """
    start_time = time.time()
    _initialize_components()
    
    current_prompt = state.get('current_prompt', '')
    production_data = state.get('production_data', [])
    
    if not current_prompt:
        return {
            "production_monitoring": "缺少必要的提示词数据",
            "current_stage": "production_monitoring"
        }
    
    try:
        # 模拟监控逻辑
        monitoring_results = {
            "prompt_effectiveness": 0.85,  # 提示词有效性评分
            "usage_statistics": {
                "total_calls": len(production_data),
                "success_rate": 0.92,
                "average_response_time": 1.5
            },
            "user_feedback": {
                "positive": 85,
                "negative": 15,
                "neutral": 0
            }
        }
        
        end_time = time.time()
        execution_time = state.get('execution_time', {}).copy()
        execution_time['production_monitoring'] = end_time - start_time
        
        return {
            "production_monitoring": monitoring_results,
            "current_stage": "production_monitoring",
            "execution_time": execution_time
        }
    except Exception as e:
        end_time = time.time()
        execution_time = state.get('execution_time', {}).copy()
        execution_time['production_monitoring'] = end_time - start_time
        
        return {
            "production_monitoring": f"生产监控失败: {str(e)}",
            "current_stage": "production_monitoring",
            "execution_time": execution_time
        }