#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
可解释性功能演示
展示如何提供决策解释、评分依据和优化建议说明
"""

import sys
import os

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))

from src.research_core import (
    ExplainablePromptEngineering,
    advanced_evaluate_prompt_quality,
    PromptEngineeringState
)


async def demo_decision_explanation():
    """演示决策解释功能"""
    print("=== 决策解释功能演示 ===\n")
    
    # 创建可解释性引擎
    explainable_eng = ExplainablePromptEngineering()
    
    # 模拟不同的状态场景
    scenarios = [
        {
            "name": "需要优化场景",
            "state": {
                'human_intervened': False,
                'quality_score': 0.65,
                'iteration_count': 2
            },
            "decision": "optimize"
        },
        {
            "name": "人工介入场景",
            "state": {
                'human_intervened': True,
                'quality_score': 0.9,
                'iteration_count': 4
            },
            "decision": "optimize"
        },
        {
            "name": "完成流程场景",
            "state": {
                'human_intervened': False,
                'quality_score': 0.85,
                'iteration_count': 3
            },
            "decision": "finalize"
        }
    ]
    
    for scenario in scenarios:
        print(f"{scenario['name']}:")
        explanation = explainable_eng.explain_decision(scenario['state'], scenario['decision'])
        print(f"  决策: {explanation['decision']}")
        print(f"  理由: {explanation['reasoning']}")
        print(f"  置信度: {explanation['confidence']:.2f}")
        print(f"  考虑因素:")
        for factor, value in explanation['factors'].items():
            print(f"    - {factor}: {value}")
        print()


async def demo_quality_score_explanation():
    """演示质量评分解释功能"""
    print("=== 质量评分解释功能演示 ===\n")
    
    # 定义测试提示词
    prompts = [
        # 质量较低的提示词
        "写代码",
        
        # 质量中等的提示词
        """请写一个Python函数来计算斐波那契数列。
        需要包含注释说明。""",
        
        # 质量较高的提示词
        """你是一位经验丰富的Python开发专家。请根据以下要求编写代码：
        1. 功能需求：实现一个函数，用于计算斐波那契数列的前n项
        2. 技术要求：
           - 使用递归算法实现
           - 添加输入验证，确保n为非负整数
           - 包含异常处理
           - 添加详细的中文注释说明算法逻辑
        3. 输出要求：
           - 提供完整的可执行代码
           - 代码遵循PEP8规范
           - 包含简单的测试用例
        4. 示例：
           输入：n=5
           输出：[0, 1, 1, 2, 3, 5]"""
    ]
    
    # 定义需求
    requirement = "我需要一个能够帮助用户写Python代码的AI助手提示词，要求提供清晰、可运行的代码示例，并包含适当的注释解释代码逻辑。"
    
    print("不同质量提示词的评分解释:")
    print("=" * 50)
    
    for i, prompt in enumerate(prompts, 1):
        print(f"\n{i}. 提示词版本 {i}:")
        print(f"   内容预览: {prompt[:50]}...")
        
        # 使用高级评估函数
        evaluation = advanced_evaluate_prompt_quality(prompt, requirement)
        
        print(f"   综合得分: {evaluation['overall_score']}")
        print(f"   评分解释: {evaluation['explanation']}")
        
        # 显示各维度评分
        print("   各维度评分:")
        for key, value in evaluation.items():
            if key in ["clarity", "completeness", "specificity", "relevance", "structuredness", "ml_score"]:
                print(f"     - {key}: {value}")


async def demo_optimization_suggestion_explanation():
    """演示优化建议解释功能"""
    print("\n=== 优化建议解释功能演示 ===\n")
    
    # 创建可解释性引擎
    explainable_eng = ExplainablePromptEngineering()
    
    # 模拟状态
    state = PromptEngineeringState({
        'requirement': '需要一个高质量的提示词',
        'requirement_analysis': '分析用户需求',
        'current_prompt': '当前提示词内容',
        'human_feedback': '人工反馈内容',
        'feedback_history': [
            {
                'type': 'human',
                'content': '提示词的结构不够清晰，建议使用编号列表组织内容',
                'timestamp': '2023-01-01T00:00:00'
            }
        ],
        'optimization_goal': '优化提示词质量',
        'prompt_evaluation': '提示词在完整性方面有待改进，建议添加更多细节要求。',
        'final_prompt': '最终提示词',
        'design_reasoning': '设计说明',
        'iteration_count': 1,
        'workflow_complete': False,
        'human_intervened': False,
        'current_stage': 'optimization',
        'quality_score': 0.65,
        'execution_time': {},
        'context_info': {},
        'metadata': {},
        'ab_test_results': None,
        'performance_metrics': [],
        'user_feedbacks': [],
        'optimization_history': [],
        'prompt_versions': {},
        'test_results': None,
        'user_preferences': {},
        'domain_context': None,
        'interaction_history': [],
        'personalization_settings': {},
        'template_recommendations': [],
        'quality_evaluation_details': {
            'overall_score': 0.65,
            'clarity': 0.7,
            'completeness': 0.4,
            'specificity': 0.6,
            'relevance': 0.8,
            'structuredness': 0.5,
            'ml_score': 0.7
        },
        'ab_test_variants': [],
        'quality_history': [],
        'decision_log': [],
        'workflow_metrics': {}
    })
    
    print("优化建议生成逻辑:")
    explanation = explainable_eng.explain_optimization_suggestion(state)
    print(f"  逻辑: {explanation['logic']}")
    print("  信息来源:")
    for source, value in explanation['sources'].items():
        print(f"    - {source}: {value}")
    print("  处理过程:")
    for step in explanation['process']:
        print(f"    {step}")
    print("  生成的建议:")
    for i, suggestion in enumerate(explanation['suggestions'], 1):
        print(f"    {i}. {suggestion}")


async def main():
    """主函数"""
    await demo_decision_explanation()
    await demo_quality_score_explanation()
    await demo_optimization_suggestion_explanation()


if __name__ == "__main__":
    import asyncio
    asyncio.run(main())