"""
A/B测试模块
实现提示词版本的自动化比较和评估
"""

import time
import asyncio
from typing import Dict, List, Any, Optional, Callable
from src.research_core.prompt_eng_state import PromptEngineeringState
from src.research_core.prompt_eng_agents import advanced_evaluate_prompt_quality


class ABTester:
    """
    A/B测试器
    支持多个提示词版本的自动化测试和比较
    """
    
    def __init__(self):
        self.test_results = {}
        # 尝试初始化ML评估器
        try:
            from src.research_core.ml_prompt_evaluator import create_ml_evaluator_with_examples
            self.ml_evaluator = create_ml_evaluator_with_examples()
        except Exception:
            self.ml_evaluator = None
    
    async def run_ab_test(self, 
                         prompt_variants: List[str], 
                         test_cases: List[Dict[str, Any]],
                         evaluation_metrics: Optional[List[str]] = None) -> Dict[str, Any]:
        """
        运行A/B测试比较不同版本的提示词
        
        Args:
            prompt_variants: 不同版本的提示词列表
            test_cases: 测试用例列表，每个用例包含输入和期望输出
            evaluation_metrics: 评估指标列表，如果为None则使用默认指标
            
        Returns:
            包含测试结果的字典
        """
        if not prompt_variants or not test_cases:
            return {"error": "缺少必要的测试数据"}
        
        if evaluation_metrics is None:
            evaluation_metrics = ["accuracy", "consistency", "efficiency"]
        
        # 存储每个版本的测试结果
        results = {}
        
        # 对每个提示词版本进行测试
        for i, prompt in enumerate(prompt_variants):
            variant_name = f"variant_{i+1}"
            print(f"正在测试版本 {variant_name}...")
            
            # 运行测试
            variant_results = await self._test_prompt_variant(
                prompt, test_cases, evaluation_metrics
            )
            
            results[variant_name] = {
                "prompt": prompt,
                "results": variant_results,
                "overall_score": self._calculate_overall_score(variant_results)
            }
        
        # 确定最佳版本
        best_variant = self._determine_best_variant(results)
        
        return {
            "variants": results,
            "best_variant": best_variant,
            "test_summary": self._generate_test_summary(results),
            "timestamp": time.time()
        }
    
    def run_ab_test_sync(self, 
                        prompt_variants: List[str], 
                        test_cases: List[Dict[str, Any]],
                        evaluation_metrics: Optional[List[str]] = None) -> Dict[str, Any]:
        """
        同步运行A/B测试
        
        Args:
            prompt_variants: 不同版本的提示词列表
            test_cases: 测试用例列表
            evaluation_metrics: 评估指标列表
            
        Returns:
            包含测试结果的字典
        """
        import asyncio
        
        # 创建异步事件循环并运行测试
        try:
            loop = asyncio.get_event_loop()
        except RuntimeError:
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
        
        coroutine = self.run_ab_test(prompt_variants, test_cases, evaluation_metrics)
        return loop.run_until_complete(coroutine)
    
    async def _test_prompt_variant(self, 
                                  prompt: str, 
                                  test_cases: List[Dict[str, Any]], 
                                  metrics: List[str]) -> Dict[str, Any]:
        """
        测试单个提示词版本
        
        Args:
            prompt: 提示词
            test_cases: 测试用例
            metrics: 评估指标
            
        Returns:
            测试结果
        """
        # 这里应该实际调用LLM进行测试，但在演示中我们使用模拟结果
        # 实际实现中，这里会与LLM交互并获取结果
        
        # 模拟测试延迟
        await asyncio.sleep(0.1)
        
        # 使用高级评估函数评估提示词质量
        quality_eval = advanced_evaluate_prompt_quality(prompt)
        
        # 如果有ML评估器，也使用它进行评估
        ml_score = 0.0
        if self.ml_evaluator:
            try:
                ml_eval = self.ml_evaluator.evaluate_prompt(prompt)
                ml_score = ml_eval.get("ml_score", 0.0)
            except Exception:
                pass
        
        # 模拟测试结果
        simulated_results = {
            "accuracy": 0.7 + (quality_eval["overall_score"] * 0.3),
            "consistency": 0.65 + (quality_eval["structuredness"] * 0.35),
            "efficiency": 0.6 + (quality_eval["clarity"] * 0.4),
            "quality_evaluation": quality_eval,
            "ml_score": ml_score
        }
        
        # 只返回请求的指标
        filtered_results = {
            metric: simulated_results.get(metric, 0.0) 
            for metric in metrics
        }
        
        # 添加质量评估详情
        filtered_results["quality_evaluation"] = quality_eval
        filtered_results["ml_score"] = ml_score
        
        return filtered_results
    
    def _calculate_overall_score(self, results: Dict[str, Any]) -> float:
        """
        计算综合得分
        
        Args:
            results: 测试结果
            
        Returns:
            综合得分
        """
        # 移除质量评估部分，只计算数值指标
        numeric_scores = {
            k: v for k, v in results.items() 
            if isinstance(v, (int, float)) and k not in ["quality_evaluation"]
        }
        
        if not numeric_scores:
            # 如果没有数值指标，使用质量评估的综合得分
            quality_eval = results.get("quality_evaluation", {})
            base_score = quality_eval.get("overall_score", 0.0)
            ml_score = results.get("ml_score", 0.0)
            # 结合传统评估和ML评估
            return (base_score + ml_score) / 2 if ml_score > 0 else base_score
        
        return sum(numeric_scores.values()) / len(numeric_scores)
    
    def _determine_best_variant(self, results: Dict[str, Any]) -> str:
        """
        确定最佳版本
        
        Args:
            results: 所有版本的测试结果
            
        Returns:
            最佳版本名称
        """
        if not results:
            return ""
        
        best_variant = max(results.items(), key=lambda x: x[1]["overall_score"])
        return best_variant[0]
    
    def _generate_test_summary(self, results: Dict[str, Any]) -> Dict[str, Any]:
        """
        生成测试摘要
        
        Args:
            results: 测试结果
            
        Returns:
            测试摘要
        """
        if not results:
            return {"summary": "无测试结果"}
        
        # 计算统计数据
        scores = [variant["overall_score"] for variant in results.values()]
        
        return {
            "total_variants": len(results),
            "average_score": sum(scores) / len(scores),
            "highest_score": max(scores),
            "lowest_score": min(scores),
            "score_variance": self._calculate_variance(scores)
        }
    
    def _calculate_variance(self, values: List[float]) -> float:
        """
        计算方差
        
        Args:
            values: 数值列表
            
        Returns:
            方差
        """
        if len(values) <= 1:
            return 0.0
        
        mean = sum(values) / len(values)
        squared_diffs = [(x - mean) ** 2 for x in values]
        return sum(squared_diffs) / len(values)


def compare_prompt_versions(state: PromptEngineeringState) -> Dict[str, Any]:
    """
    比较提示词版本（用于工作流中的节点），支持智能A/B测试
    
    Args:
        state: 当前状态
        
    Returns:
        更新后的状态
    """
    # 获取当前提示词和历史版本
    current_prompt = state.get('current_prompt', '')
    prompt_versions = state.get('prompt_versions', {})
    
    # 准备要比较的版本
    variants = [current_prompt] if current_prompt else []
    for version_name, prompt_content in prompt_versions.items():
        if prompt_content and prompt_content not in variants:
            variants.append(prompt_content)
    
    if len(variants) < 2:
        return {
            "ab_test_results": {"error": "至少需要两个版本才能进行比较"},
            "current_stage": "ab_test"
        }
    
    # 创建测试器并运行测试
    tester = ABTester()
    
    # 从状态获取测试用例，如果没有则使用默认值
    test_cases = state.get('test_cases', [
        {"input": "测试输入1", "expected": "期望输出1"},
        {"input": "测试输入2", "expected": "期望输出2"}
    ])
    
    # 获取评估指标
    evaluation_metrics = state.get('evaluation_metrics', ["accuracy", "consistency", "efficiency"])
    
    # 运行测试
    try:
        loop = asyncio.get_event_loop()
    except RuntimeError:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
    
    # 获取测试结果
    ab_test_results = loop.run_until_complete(
        tester.run_ab_test(variants, test_cases, evaluation_metrics)
    )
    
    # 更新最佳提示词
    best_variant = ab_test_results["best_variant"]
    best_prompt = ab_test_results["variants"][best_variant]["prompt"]
    
    # 返回更新后的状态
    return {
        "ab_test_results": ab_test_results,
        "current_prompt": best_prompt,
        "quality_score": ab_test_results["variants"][best_variant]["overall_score"],
        "current_stage": "ab_test_complete",
        "test_summary": ab_test_results["test_summary"]
    }


def create_ab_test_workflow():
    """
    创建专门用于A/B测试的工作流
    """
    # 这个函数可以用于创建一个专门的A/B测试工作流
    # 目前只是一个占位符
    pass


# 导出类和函数
__all__ = [
    'ABTester',
    'compare_prompt_versions',
    'create_ab_test_workflow'
]