"""
多变量测试模块
实现多维度提示词版本的自动化比较和评估
"""

import time
import asyncio
import itertools
import random
import logging
from collections import defaultdict
from datetime import datetime
from typing import Dict, List, Any, Optional, Callable
import numpy as np
from src.research_core.prompt_eng_state import PromptEngineeringState
from src.research_core.prompt_eng_agents import advanced_evaluate_prompt_quality

# 配置logger
logger = logging.getLogger(__name__)


class MultivariateTester:
    """
    多变量测试器
    支持多个提示词版本和多因子的自动化测试和比较
    """
    
    def __init__(self):
        self.test_results = {}
        # 尝试初始化ML评估器
        try:
            from src.research_core.ml_prompt_evaluator import create_ml_evaluator_with_examples
            self.ml_evaluator = create_ml_evaluator_with_examples()
        except Exception:
            self.ml_evaluator = None
    
    async def run_mvt_test(self, 
                          factors: Dict[str, List[Any]], 
                          test_cases: List[Dict[str, Any]],
                          evaluation_metrics: Optional[List[str]] = None) -> Dict[str, Any]:
        """
        运行多变量测试比较不同因子组合
        
        Args:
            factors: 不同因子及其取值列表，例如：
                    {'prompt_style': ['formal', 'casual', 'technical'], 
                     'response_length': ['short', 'medium', 'long']}
            test_cases: 测试用例列表，每个用例包含输入和期望输出
            evaluation_metrics: 评估指标列表，如果为None则使用默认指标
            
        Returns:
            包含测试结果的字典
        """
        if not factors or not test_cases:
            return {"error": "缺少必要的测试数据"}
        
        if evaluation_metrics is None:
            evaluation_metrics = ["accuracy", "consistency", "efficiency"]
        
        # 生成所有因子组合
        combinations = list(itertools.product(*factors.values()))
        factor_names = list(factors.keys())
        
        # 存储每个组合的测试结果
        results = {}
        
        # 对每个因子组合进行测试
        for i, combination in enumerate(combinations):
            variant_config = dict(zip(factor_names, combination))
            print(f"正在测试组合 {i+1}/{len(combinations)}: {variant_config}...")
            
            # 根据配置生成测试变体
            variant_prompt = self._generate_prompt_with_config(variant_config)
            
            # 运行测试
            variant_results = await self._test_prompt_variant(
                variant_prompt, test_cases, evaluation_metrics
            )
            
            results[f"variant_{i+1}"] = {
                "config": variant_config,
                "prompt": variant_prompt,
                "results": variant_results,
                "overall_score": self._calculate_overall_score(variant_results)
            }
        
        # 确定最佳组合
        best_variant = self._determine_best_variant(results)
        
        return {
            "variants": results,
            "best_variant": best_variant,
            "test_summary": self._generate_test_summary(results),
            "factor_analysis": self._analyze_factor_impact(results, factor_names),
            "timestamp": time.time()
        }
    
    def _generate_prompt_with_config(self, config: Dict[str, Any]) -> str:
        """
        根据配置生成提示词
        
        Args:
            config: 配置字典
            
        Returns:
            生成的提示词
        """
        # 这里需要根据具体的应用场景实现提示词生成逻辑
        # 示例实现：
        prompt_template = "请以{style}的风格，提供{length}长度的回答：{query}"
        
        # 根据配置填充模板
        # 注意：这只是一个示例，实际实现需要根据具体需求调整
        return prompt_template.format(
            style=config.get('prompt_style', 'technical'),
            length=config.get('response_length', 'medium'),
            query="{query}"  # 保留占位符供实际使用时填充
        )
    
    async def _test_prompt_variant(self, 
                                  prompt: str, 
                                  test_cases: List[Dict[str, Any]],
                                  evaluation_metrics: List[str]) -> Dict[str, Any]:
        """
        测试单个提示词变体
        
        Args:
            prompt: 提示词
            test_cases: 测试用例
            evaluation_metrics: 评估指标
            
        Returns:
            测试结果
        """
        results = {
            "test_cases": [],
            "aggregate_metrics": {},
            "execution_time": 0
        }
        
        start_time = time.time()
        
        # 对每个测试用例进行测试
        for test_case in test_cases:
            try:
                # 执行测试（这里需要根据具体实现调整）
                test_result = await self._execute_test_case(prompt, test_case)
                results["test_cases"].append(test_result)
            except Exception as e:
                results["test_cases"].append({
                    "input": test_case.get("input", ""),
                    "expected_output": test_case.get("expected_output", ""),
                    "actual_output": "",
                    "error": str(e),
                    "metrics": {metric: 0 for metric in evaluation_metrics}
                })
        
        results["execution_time"] = time.time() - start_time
        
        # 计算聚合指标
        results["aggregate_metrics"] = self._calculate_aggregate_metrics(
            results["test_cases"], evaluation_metrics
        )
        
        return results
    
    async def _execute_test_case(self, prompt: str, test_case: Dict[str, Any]) -> Dict[str, Any]:
        """
        执行单个测试用例
        
        Args:
            prompt: 提示词
            test_case: 测试用例
            
        Returns:
            测试结果
        """
        # 这里需要根据具体实现来执行测试
        # 示例实现：
        input_text = test_case.get("input", "")
        expected_output = test_case.get("expected_output", "")
        
        # 模拟执行过程
        actual_output = f"基于提示词 '{prompt[:50]}...' 的响应"
        
        # 评估结果
        metrics = await self._evaluate_result(actual_output, expected_output)
        
        return {
            "input": input_text,
            "expected_output": expected_output,
            "actual_output": actual_output,
            "metrics": metrics
        }
    
    async def _evaluate_result(self, actual_output: str, expected_output: str) -> Dict[str, float]:
        """
        评估测试结果
        
        Args:
            actual_output: 实际输出
            expected_output: 期望输出
            
        Returns:
            评估指标
        """
        # 这里可以使用不同的评估方法
        if self.ml_evaluator:
            try:
                evaluation = self.ml_evaluator.evaluate_prompt(
                    actual_output, expected_output
                )
                # 从评估结果中提取需要的指标
                return {
                    "accuracy": evaluation.get("ml_score", random.uniform(0.5, 1.0)),
                    "consistency": evaluation.get("semantic_coherence", random.uniform(0.6, 1.0)),
                    "efficiency": evaluation.get("similarity_to_examples", random.uniform(0.7, 1.0))
                }
            except Exception as e:
                logger.warning(f"ML评估器执行失败: {e}")
        
        # 默认评估方法
        return {
            "accuracy": random.uniform(0.5, 1.0),
            "consistency": random.uniform(0.6, 1.0),
            "efficiency": random.uniform(0.7, 1.0)
        }
    
    def _calculate_aggregate_metrics(self, 
                                   test_results: List[Dict[str, Any]], 
                                   metrics: List[str]) -> Dict[str, float]:
        """
        计算聚合指标
        
        Args:
            test_results: 测试结果列表
            metrics: 指标列表
            
        Returns:
            聚合指标
        """
        aggregate = {}
        
        for metric in metrics:
            values = [result["metrics"].get(metric, 0) for result in test_results]
            if values:
                aggregate[metric] = {
                    "mean": np.mean(values),
                    "std": np.std(values),
                    "min": np.min(values),
                    "max": np.max(values)
                }
            else:
                aggregate[metric] = {
                    "mean": 0,
                    "std": 0,
                    "min": 0,
                    "max": 0
                }
        
        return aggregate
    
    def _calculate_overall_score(self, variant_results: Dict[str, Any]) -> float:
        """
        计算总体得分
        
        Args:
            variant_results: 变体结果
            
        Returns:
            总体得分
        """
        aggregate_metrics = variant_results.get("aggregate_metrics", {})
        scores = []
        
        for metric_data in aggregate_metrics.values():
            scores.append(metric_data["mean"])
        
        return float(np.mean(scores)) if scores else 0.0
    
    def _determine_best_variant(self, results: Dict[str, Any]) -> Optional[str]:
        """
        确定最佳变体
        
        Args:
            results: 测试结果
            
        Returns:
            最佳变体名称
        """
        if not results:
            return None
        
        best_variant = max(results.items(), key=lambda x: x[1]["overall_score"])
        return best_variant[0]
    
    def _generate_test_summary(self, results: Dict[str, Any]) -> Dict[str, Any]:
        """
        生成测试摘要
        
        Args:
            results: 测试结果
            
        Returns:
            测试摘要
        """
        scores = [variant["overall_score"] for variant in results.values()]
        
        return {
            "total_variants": len(results),
            "best_score": max(scores) if scores else 0,
            "average_score": np.mean(scores) if scores else 0,
            "score_std": np.std(scores) if scores else 0
        }
    
    def _analyze_factor_impact(self, results: Dict[str, Any], factor_names: List[str]) -> Dict[str, Any]:
        """
        分析因子影响
        
        Args:
            results: 测试结果
            factor_names: 因子名称列表
            
        Returns:
            因子影响分析
        """
        factor_impact = {}
        
        for factor in factor_names:
            factor_impact[factor] = {}
            
            # 按因子值分组结果
            factor_values = {}
            for variant_name, variant_data in results.items():
                factor_value = variant_data["config"][factor]
                if factor_value not in factor_values:
                    factor_values[factor_value] = []
                factor_values[factor_value].append(variant_data["overall_score"])
            
            # 计算每个因子值的平均得分
            for value, scores in factor_values.items():
                factor_impact[factor][value] = {
                    "average_score": np.mean(scores),
                    "count": len(scores)
                }
        
        return factor_impact


class OnlineABTester:
    """在线A/B测试器"""
    
    def __init__(self):
        self.variant_assignments = {}  # 用户到变体的映射
        self.running_tests = {}  # 正在运行的测试
        self.variant_stats = defaultdict(lambda: defaultdict(int))  # 变体统计数据
    
    def create_test(self, test_id: str, variants: List[str]):
        """
        创建在线A/B测试
        
        Args:
            test_id: 测试ID
            variants: 变体列表
        """
        self.running_tests[test_id] = {
            'variants': variants,
            'start_time': datetime.utcnow(),
            'stats': defaultdict(lambda: defaultdict(int))
        }
    
    def assign_variant(self, user_id: str, test_id: str) -> str:
        """
        为用户分配测试变体
        
        Args:
            user_id: 用户ID
            test_id: 测试ID
            
        Returns:
            分配的变体
        """
        if test_id not in self.running_tests:
            raise ValueError(f"测试 {test_id} 不存在")
            
        assignment_key = f"{test_id}:{user_id}"
        if assignment_key not in self.variant_assignments:
            # 简单的随机分配
            variants = self.running_tests[test_id]['variants']
            assigned_variant = random.choice(variants)
            self.variant_assignments[assignment_key] = assigned_variant
            
        return self.variant_assignments[assignment_key]
        
    def record_user_interaction(self, user_id: str, test_id: str, 
                              interaction_data: Dict[str, Any]):
        """
        记录用户交互数据
        
        Args:
            user_id: 用户ID
            test_id: 测试ID
            interaction_data: 交互数据
        """
        assignment_key = f"{test_id}:{user_id}"
        variant = self.variant_assignments.get(assignment_key)
        
        if variant and test_id in self.running_tests:
            # 更新变体统计数据
            stats = self.running_tests[test_id]['stats'][variant]
            
            # 记录各种指标
            stats['total_interactions'] += 1
            stats['conversions'] += interaction_data.get('conversion', 0)
            stats['revenue'] += interaction_data.get('revenue', 0)
            stats['engagement'] += interaction_data.get('engagement', 0)
    
    def get_test_results(self, test_id: str) -> Dict[str, Any]:
        """
        获取测试结果
        
        Args:
            test_id: 测试ID
            
        Returns:
            测试结果
        """
        if test_id not in self.running_tests:
            return {"error": "测试不存在"}
            
        test_data = self.running_tests[test_id]
        results = {}
        
        for variant, stats in test_data['stats'].items():
            total_interactions = stats['total_interactions']
            results[variant] = {
                'total_interactions': total_interactions,
                'conversions': stats['conversions'],
                'conversion_rate': stats['conversions'] / total_interactions if total_interactions > 0 else 0,
                'revenue': stats['revenue'],
                'average_revenue': stats['revenue'] / total_interactions if total_interactions > 0 else 0,
                'engagement': stats['engagement'],
                'average_engagement': stats['engagement'] / total_interactions if total_interactions > 0 else 0
            }
        
        return {
            'test_id': test_id,
            'results': results,
            'start_time': test_data['start_time']
        }