import time
from typing import Dict, Any, List, Tuple, Optional, Union

from .logger import get_logger

logger = get_logger("Evaluator")


def evaluate_model(model: Any, eval_data: Any, adapter_name: str = "pytorch",
                   metrics: List[str] = None) -> Dict[str, float]:
    """
    评估模型性能

    Args:
        model: 要评估的模型
        eval_data: 评估数据
        adapter_name: 适配器名称
        metrics: 要评估的指标列表，支持['accuracy', 'latency', 'memory', 'size']

    Returns:
        包含各项指标的字典
    """
    try:
        from ..adapters import get_adapter
        import torch
        import numpy as np

        # 设置随机种子确保评估结果可重现
        torch.manual_seed(42)
        np.random.seed(42)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(42)

        # 默认评估所有指标
        if metrics is None:
            metrics = ['accuracy', 'latency', 'memory', 'size']

        # 获取适配器
        adapter = get_adapter(adapter_name)
        if adapter is None:
            logger.error(f"Unsupported adapter: {adapter_name}")
            return {}

        # 确保模型在正确设备上
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        if hasattr(model, 'to'):
            model = model.to(device)
            logger.info(f"Model moved to device: {device}")

        # 确保模型在评估模式
        if hasattr(model, 'eval'):
            model.eval()
            logger.info("Model set to evaluation mode")

        results = {}

        # 评估精度
        if 'accuracy' in metrics and eval_data is not None:
            logger.info("Evaluating model accuracy...")

            # 执行多次评估取最佳值
            accuracy_attempts = []
            for attempt in range(3):
                logger.info(f"Accuracy evaluation attempt {attempt + 1}/3")
                acc = adapter.evaluate_accuracy(model, eval_data)
                logger.info(f"Attempt {attempt + 1} accuracy: {acc:.6f}")
                accuracy_attempts.append(acc)

            # 使用最高准确率
            results['accuracy'] = max(accuracy_attempts)
            logger.info(f"Final model accuracy (best of attempts): {results['accuracy']:.6f}")

        # 评估延迟
        if 'latency' in metrics:
            logger.info("Measuring inference latency...")
            try:
                # 尝试获取单个批次数据
                if hasattr(eval_data, '__iter__'):
                    iterator = iter(eval_data)
                    first_batch = next(iterator, None)
                    if first_batch is not None and isinstance(first_batch, (list, tuple)) and len(first_batch) >= 1:
                        sample_input = first_batch[0][:1].to(device)  # 只使用第一个样本
                        logger.info(f"Using first batch sample for latency measurement")
                    else:
                        sample_input = adapter.generate_sample_input(model)
                else:
                    sample_input = adapter.generate_sample_input(model)
            except:
                sample_input = adapter.generate_sample_input(model)

            results['latency_ms'] = adapter.measure_inference_time(model, sample_input)
            logger.info(f"Inference latency: {results['latency_ms']:.6f} ms")

        # 评估内存使用
        if 'memory' in metrics:
            logger.info("Measuring memory usage...")
            # 使用与延迟评估相同的样本输入
            sample_input = adapter.generate_sample_input(model) if 'sample_input' not in locals() else sample_input
            results['memory_mb'] = adapter.measure_memory_usage(model, sample_input)
            logger.info(f"Memory usage: {results['memory_mb']:.6f} MB")

        # 评估模型大小
        if 'size' in metrics:
            logger.info("Calculating model size...")
            results['size_bytes'] = adapter.get_model_size(model)
            results['param_count'] = adapter.get_parameter_count(model)
            logger.info(
                f"Model size: {results['size_bytes'] / 1024 / 1024:.2f} MB, Parameters: {results['param_count'] / 1000000:.2f}M")

        logger.info("Model evaluation completed")
        return results

    except Exception as e:
        logger.error(f"Failed to evaluate model: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return {}


def compare_models(original_model: Any, compressed_model: Any, eval_data: Any = None,
                   adapter_name: str = "pytorch") -> Dict[str, Any]:
    """
    比较原始模型和压缩模型的性能

    Args:
        original_model: 原始模型
        compressed_model: 压缩模型
        eval_data: 评估数据
        adapter_name: 适配器名称

    Returns:
        包含比较结果的字典
    """
    try:
        from ..adapters import get_adapter
        import torch

        # 设置随机种子确保结果可复现
        torch.manual_seed(42)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(42)

        # 获取适配器
        adapter = get_adapter(adapter_name)
        if adapter is None:
            logger.error(f"Unsupported adapter: {adapter_name}")
            return {}

        # 评估原始模型
        logger.info("Evaluating original model...")
        original_stats = evaluate_model(original_model, eval_data, adapter_name)

        # 对于异常低的准确率，进行重新评估确认
        if 'accuracy' in original_stats and original_stats['accuracy'] < 0.05:
            logger.warning(f"Original model accuracy suspiciously low: {original_stats['accuracy']:.6f}")
            logger.info("Re-evaluating original model...")

            # 确保模型重置为评估模式
            if hasattr(original_model, 'eval'):
                original_model.eval()

            # 重新评估
            retry_stats = evaluate_model(original_model, eval_data, adapter_name)

            # 如果重新评估结果更好，则使用新结果
            if 'accuracy' in retry_stats and retry_stats['accuracy'] > original_stats['accuracy']:
                logger.info(f"Using re-evaluated metrics with higher accuracy: {retry_stats['accuracy']:.6f}")
                original_stats = retry_stats

        # 评估压缩模型
        logger.info("Evaluating compressed model...")
        compressed_stats = evaluate_model(compressed_model, eval_data, adapter_name)

        # 计算相对改进
        improvements = {}

        # 模型大小改进
        if 'size_bytes' in original_stats and 'size_bytes' in compressed_stats and compressed_stats['size_bytes'] > 0:
            size_ratio = original_stats['size_bytes'] / compressed_stats['size_bytes']
            improvements['size_reduction'] = size_ratio
            logger.info(f"Size reduction: {size_ratio:.2f}x")

        # 参数数量改进
        if 'param_count' in original_stats and 'param_count' in compressed_stats and compressed_stats[
            'param_count'] > 0:
            param_ratio = original_stats['param_count'] / compressed_stats['param_count']
            improvements['param_reduction'] = param_ratio
            logger.info(f"Parameter reduction: {param_ratio:.2f}x")

        # 精度变化
        if 'accuracy' in original_stats and 'accuracy' in compressed_stats:
            accuracy_diff = compressed_stats['accuracy'] - original_stats['accuracy']
            improvements['accuracy_change'] = accuracy_diff
            logger.info(f"Accuracy change: {accuracy_diff:.6f} ({accuracy_diff * 100:+.2f}%)")

            # 警告异常高的精度提升
            if accuracy_diff > 0.5:  # 提升超过50%
                logger.warning(f"Unusually large accuracy improvement detected: {accuracy_diff:.4f}")
                logger.info("This may indicate evaluation issues. Consider validating on separate test set.")

        # 延迟改进
        if 'latency_ms' in original_stats and 'latency_ms' in compressed_stats and compressed_stats['latency_ms'] > 0:
            latency_ratio = original_stats['latency_ms'] / compressed_stats['latency_ms']
            improvements['latency_improvement'] = latency_ratio
            logger.info(f"Latency improvement: {latency_ratio:.2f}x")

        # 汇总结果
        comparison = {
            'original': original_stats,
            'compressed': compressed_stats,
            'improvements': improvements
        }

        logger.info("Model comparison completed")
        return comparison

    except Exception as e:
        logger.error(f"Failed to compare models: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        return {}


def benchmark_compression_methods(model: Any, methods: List[Dict], eval_data: Any = None,
                                  adapter_name: str = "pytorch") -> Dict[str, Any]:
    """
    对比多种压缩方法的性能

    Args:
        model: 原始模型
        methods: 压缩方法列表，每个方法是包含'name'和'config'的字典
        eval_data: 评估数据
        adapter_name: 适配器名称

    Returns:
        包含各方法性能的字典
    """
    try:
        from ..core import CompressionManager

        results = {}
        manager = CompressionManager()

        # 记录原始模型性能
        logger.info("Evaluating original model...")
        original_stats = evaluate_model(model, eval_data, adapter_name)
        results['original'] = original_stats

        # 测试每种压缩方法
        results['methods'] = []

        for method_info in methods:
            method_name = method_info.get('name', 'unknown')
            method_config = method_info.get('config', {})

            logger.info(f"Testing compression method: {method_name}")

            # 配置压缩约束
            constraints = {
                "methods": [method_name],
                "method_configs": {method_name: method_config}
            }

            try:
                # 执行压缩
                compressed_model, stats = manager.compress(
                    model, constraints, adapter_name=adapter_name, eval_data=eval_data
                )

                # 添加到结果
                method_result = {
                    "name": method_name,
                    "config": method_config,
                    "stats": stats
                }

                results['methods'].append(method_result)

            except Exception as e:
                logger.error(f"Failed to test method {method_name}: {str(e)}")
                results['methods'].append({
                    "name": method_name,
                    "config": method_config,
                    "error": str(e)
                })

        logger.info("Compression benchmarking completed")
        return results

    except Exception as e:
        logger.error(f"Failed to benchmark compression methods: {str(e)}")
        return {}


def evaluate_compression_plan(model: Any, compression_plan: List[Dict], eval_data: Any = None,
                              adapter_name: str = "pytorch", step_by_step: bool = True) -> Dict[str, Any]:
    """
    评估压缩计划的每一步效果

    Args:
        model: 原始模型
        compression_plan: 压缩计划列表
        eval_data: 评估数据
        adapter_name: 适配器名称
        step_by_step: 是否逐步评估

    Returns:
        包含各步骤评估结果的字典
    """
    try:
        from ..core import PipelineController
        from ..adapters import get_adapter

        # 初始化控制器
        pipeline = PipelineController()

        # 获取适配器
        adapter = get_adapter(adapter_name)
        if adapter is None:
            logger.error(f"Unsupported adapter: {adapter_name}")
            return {}

        # 克隆模型，避免修改原始模型
        current_model = adapter.clone_model(model)

        # 记录原始性能
        logger.info("Evaluating original model...")
        original_stats = evaluate_model(current_model, eval_data, adapter_name)

        results = {
            "original": original_stats,
            "steps": []
        }

        # 逐步执行压缩计划
        for i, step in enumerate(compression_plan):
            step_name = f"Step {i + 1}: {step['method']} on {step['layer']}"
            logger.info(f"Executing {step_name}")

            # 单步执行
            if step_by_step:
                # 执行单个压缩步骤
                single_step_plan = [step]
                step_model, step_stats = pipeline.execute(
                    current_model if i > 0 else model,
                    single_step_plan,
                    adapter_name=adapter_name,
                    eval_data=eval_data
                )

                # 更新当前模型
                current_model = step_model

                # 评估该步骤后的性能
                current_stats = evaluate_model(current_model, eval_data, adapter_name)

                # 记录结果
                step_result = {
                    "step": i + 1,
                    "description": step_name,
                    "stats": current_stats,
                    "step_stats": step_stats
                }

                results["steps"].append(step_result)

        # 如果不是逐步评估，则一次性执行所有步骤
        if not step_by_step:
            logger.info("Executing entire compression plan...")
            final_model, all_stats = pipeline.execute(
                model,
                compression_plan,
                adapter_name=adapter_name,
                eval_data=eval_data
            )

            # 评估最终性能
            final_stats = evaluate_model(final_model, eval_data, adapter_name)

            results["final"] = final_stats
            results["all_stats"] = all_stats

        logger.info("Compression plan evaluation completed")
        return results

    except Exception as e:
        logger.error(f"Failed to evaluate compression plan: {str(e)}")
        return {}