import torch
import torch.nn as nn
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
import psutil
import os
import time
import json
from datetime import datetime


class ModelEvaluator:
    """模型评估器 - 全面评估原始模型和压缩模型"""

    def __init__(self):
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    def get_model_size_mb(self, model):
        """计算模型参数占用的内存大小（MB）"""
        param_size = 0
        param_sum = 0

        for param in model.parameters():
            param_size += param.nelement() * param.element_size()
            param_sum += param.nelement()

        buffer_size = 0
        buffer_sum = 0

        for buffer in model.buffers():
            buffer_size += buffer.nelement() * buffer.element_size()
            buffer_sum += buffer.nelement()

        size_mb = (param_size + buffer_size) / 1024 / 1024
        return size_mb, param_sum, buffer_sum

    def get_file_size_mb(self, filepath):
        """获取文件大小（MB）"""
        if os.path.exists(filepath):
            return os.path.getsize(filepath) / 1024 / 1024
        return 0

    def measure_inference_time(self, model, test_loader, num_runs=10):
        """测量推理时间"""
        model.eval()
        times = []

        # 预热
        with torch.no_grad():
            for i, (data, _) in enumerate(test_loader):
                if i >= 3:  # 预热3个batch
                    break
                data = data.to(self.device)
                _ = model(data)

        # 正式测试
        with torch.no_grad():
            for run in range(num_runs):
                start_time = time.time()
                for i, (data, _) in enumerate(test_loader):
                    if i >= 10:  # 测试10个batch
                        break
                    data = data.to(self.device)
                    _ = model(data)
                end_time = time.time()
                times.append(end_time - start_time)

        return np.mean(times), np.std(times)

    def measure_memory_usage(self, model, test_loader):
        """测量内存使用情况"""
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.reset_peak_memory_stats()

            # 测量推理时的显存使用
            model.eval()
            with torch.no_grad():
                for i, (data, _) in enumerate(test_loader):
                    if i >= 5:  # 测试5个batch
                        break
                    data = data.to(self.device)
                    _ = model(data)

            memory_allocated = torch.cuda.memory_allocated() / 1024 / 1024  # MB
            memory_reserved = torch.cuda.memory_reserved() / 1024 / 1024  # MB
            max_memory_allocated = torch.cuda.max_memory_allocated() / 1024 / 1024  # MB

            return {
                'allocated_mb': memory_allocated,
                'reserved_mb': memory_reserved,
                'max_allocated_mb': max_memory_allocated
            }
        else:
            # CPU内存使用
            process = psutil.Process(os.getpid())
            memory_info = process.memory_info()
            return {
                'allocated_mb': memory_info.rss / 1024 / 1024,
                'reserved_mb': memory_info.vms / 1024 / 1024,
                'max_allocated_mb': memory_info.rss / 1024 / 1024
            }

    def evaluate_model_comprehensive(self, model, test_loader, model_path=None, model_name="Model"):
        """全面评估模型"""
        print(f"\n{'=' * 60}")
        print(f"评估 {model_name}")
        print(f"{'=' * 60}")

        model.eval()
        all_predictions = []
        all_targets = []
        total_samples = 0

        # 1. 准确率和精度评估
        print("1. 进行准确率和精度评估...")
        with torch.no_grad():
            for data, targets in test_loader:
                data, targets = data.to(self.device), targets.to(self.device)
                outputs = model(data)
                _, predicted = torch.max(outputs.data, 1)

                all_predictions.extend(predicted.cpu().numpy())
                all_targets.extend(targets.cpu().numpy())
                total_samples += targets.size(0)

        # 计算各种指标
        accuracy = accuracy_score(all_targets, all_predictions)
        precision_macro = precision_score(all_targets, all_predictions, average='macro', zero_division=0)
        precision_micro = precision_score(all_targets, all_predictions, average='micro', zero_division=0)
        recall_macro = recall_score(all_targets, all_predictions, average='macro', zero_division=0)
        recall_micro = recall_score(all_targets, all_predictions, average='micro', zero_division=0)
        f1_macro = f1_score(all_targets, all_predictions, average='macro', zero_division=0)
        f1_micro = f1_score(all_targets, all_predictions, average='micro', zero_division=0)

        # 2. 模型参数统计
        print("2. 计算模型参数统计...")
        model_size_mb, total_params, total_buffers = self.get_model_size_mb(model)

        # 计算不同类型参数的数量
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        non_trainable_params = sum(p.numel() for p in model.parameters() if not p.requires_grad)

        # 3. 文件大小
        print("3. 检查文件大小...")
        file_size_mb = 0
        if model_path and os.path.exists(model_path):
            file_size_mb = self.get_file_size_mb(model_path)

        # 4. 推理时间测试
        print("4. 测量推理时间...")
        avg_inference_time, std_inference_time = self.measure_inference_time(model, test_loader)

        # 5. 内存使用测试
        print("5. 测量内存使用...")
        memory_usage = self.measure_memory_usage(model, test_loader)

        # 6. 模型复杂度分析
        print("6. 分析模型复杂度...")

        # 统计不同层的参数量
        layer_stats = {}
        for name, module in model.named_modules():
            if len(list(module.children())) == 0:  # 叶子节点
                params = sum(p.numel() for p in module.parameters())
                if params > 0:
                    layer_stats[name] = {
                        'params': params,
                        'type': type(module).__name__
                    }

        # 计算稀疏度（如果是压缩模型）
        sparsity = 0
        if hasattr(model, 'get_sparsity'):
            sparsity = model.get_sparsity()
        else:
            # 简单估算稀疏度
            zero_params = 0
            total_params_count = 0
            for param in model.parameters():
                if param.requires_grad:
                    total_params_count += param.numel()
                    zero_params += (param.abs() < 1e-6).sum().item()
            if total_params_count > 0:
                sparsity = zero_params / total_params_count

        # 7. 输出详细结果
        results = {
            'model_name': model_name,
            'timestamp': datetime.now().isoformat(),
            'performance_metrics': {
                'accuracy': accuracy,
                'precision_macro': precision_macro,
                'precision_micro': precision_micro,
                'recall_macro': recall_macro,
                'recall_micro': recall_micro,
                'f1_macro': f1_macro,
                'f1_micro': f1_micro,
                'total_samples': total_samples
            },
            'model_complexity': {
                'total_parameters': total_params,
                'trainable_parameters': trainable_params,
                'non_trainable_parameters': non_trainable_params,
                'total_buffers': total_buffers,
                'sparsity_ratio': sparsity
            },
            'model_size': {
                'memory_size_mb': model_size_mb,
                'file_size_mb': file_size_mb,
                'parameters_size_mb': (trainable_params * 4) / 1024 / 1024  # 假设float32
            },
            'performance': {
                'avg_inference_time_sec': avg_inference_time,
                'std_inference_time_sec': std_inference_time,
                'throughput_samples_per_sec': total_samples / avg_inference_time if avg_inference_time > 0 else 0
            },
            'memory_usage': memory_usage,
            'layer_statistics': layer_stats
        }

        # 打印详细日志
        self.print_evaluation_log(results)

        return results

    def print_evaluation_log(self, results):
        """打印详细的评估日志"""
        print(f"\n📊 {results['model_name']} 详细评估报告")
        print(f"评估时间: {results['timestamp']}")
        print(f"{'=' * 80}")

        # 性能指标
        perf = results['performance_metrics']
        print(f"\n🎯 性能指标:")
        print(f"  准确率 (Accuracy):      {perf['accuracy']:.4f} ({perf['accuracy'] * 100:.2f}%)")
        print(f"  精确率 (Precision):     宏平均 {perf['precision_macro']:.4f}, 微平均 {perf['precision_micro']:.4f}")
        print(f"  召回率 (Recall):        宏平均 {perf['recall_macro']:.4f}, 微平均 {perf['recall_micro']:.4f}")
        print(f"  F1分数 (F1-Score):      宏平均 {perf['f1_macro']:.4f}, 微平均 {perf['f1_micro']:.4f}")
        print(f"  测试样本数:             {perf['total_samples']}")

        # 模型复杂度
        complexity = results['model_complexity']
        print(f"\n🏗️  模型复杂度:")
        print(f"  总参数量:               {complexity['total_parameters']:,}")
        print(f"  可训练参数:             {complexity['trainable_parameters']:,}")
        print(f"  不可训练参数:           {complexity['non_trainable_parameters']:,}")
        print(f"  缓冲区参数:             {complexity['total_buffers']:,}")
        print(f"  稀疏度:                 {complexity['sparsity_ratio']:.4f} ({complexity['sparsity_ratio'] * 100:.2f}%)")

        # 模型大小
        size = results['model_size']
        print(f"\n💾 存储占用:")
        print(f"  内存大小:               {size['memory_size_mb']:.2f} MB")
        print(f"  文件大小:               {size['file_size_mb']:.2f} MB")
        print(f"  参数存储大小:           {size['parameters_size_mb']:.2f} MB")

        # 性能表现
        performance = results['performance']
        print(f"\n⚡ 运行性能:")
        print(
            f"  平均推理时间:           {performance['avg_inference_time_sec']:.4f} ± {performance['std_inference_time_sec']:.4f} 秒")
        print(f"  吞吐量:                 {performance['throughput_samples_per_sec']:.2f} 样本/秒")

        # 内存使用
        memory = results['memory_usage']
        print(f"\n🧠 内存使用:")
        print(f"  已分配内存:             {memory['allocated_mb']:.2f} MB")
        print(f"  保留内存:               {memory['reserved_mb']:.2f} MB")
        print(f"  峰值内存:               {memory['max_allocated_mb']:.2f} MB")

        # 层统计（显示前10个最大的层）
        if results['layer_statistics']:
            print(f"\n🔍 主要层统计 (参数量前10):")
            sorted_layers = sorted(results['layer_statistics'].items(),
                                   key=lambda x: x[1]['params'], reverse=True)
            for i, (layer_name, stats) in enumerate(sorted_layers[:10]):
                print(f"  {i + 1:2d}. {layer_name:<30} {stats['type']:<15} {stats['params']:>10,} 参数")

    def compare_models(self, original_results, compressed_results):
        """比较原始模型和压缩模型"""
        print(f"\n{'=' * 80}")
        print(f"📊 模型对比分析")
        print(f"{'=' * 80}")

        # 性能对比
        orig_acc = original_results['performance_metrics']['accuracy']
        comp_acc = compressed_results['performance_metrics']['accuracy']
        acc_loss = orig_acc - comp_acc
        acc_loss_pct = (acc_loss / orig_acc) * 100 if orig_acc > 0 else 0

        print(f"\n🎯 性能对比:")
        print(f"  原始模型准确率:         {orig_acc:.4f} ({orig_acc * 100:.2f}%)")
        print(f"  压缩模型准确率:         {comp_acc:.4f} ({comp_acc * 100:.2f}%)")
        print(f"  准确率损失:             {acc_loss:.4f} ({acc_loss_pct:.2f}%)")

        # 模型大小对比
        orig_size = original_results['model_size']['file_size_mb']
        comp_size = compressed_results['model_size']['file_size_mb']
        size_ratio = orig_size / comp_size if comp_size > 0 else 0
        size_reduction = orig_size - comp_size
        size_reduction_pct = (size_reduction / orig_size) * 100 if orig_size > 0 else 0

        print(f"\n💾 大小对比:")
        print(f"  原始模型大小:           {orig_size:.2f} MB")
        print(f"  压缩模型大小:           {comp_size:.2f} MB")
        print(f"  压缩率:                 {size_ratio:.2f}x")
        print(f"  大小减少:               {size_reduction:.2f} MB ({size_reduction_pct:.2f}%)")

        # 参数量对比
        orig_params = original_results['model_complexity']['total_parameters']
        comp_params = compressed_results['model_complexity']['total_parameters']
        param_ratio = orig_params / comp_params if comp_params > 0 else 0

        print(f"\n🏗️  参数量对比:")
        print(f"  原始模型参数量:         {orig_params:,}")
        print(f"  压缩模型参数量:         {comp_params:,}")
        print(f"  参数压缩率:             {param_ratio:.2f}x")

        # 速度对比
        orig_time = original_results['performance']['avg_inference_time_sec']
        comp_time = compressed_results['performance']['avg_inference_time_sec']
        speed_ratio = orig_time / comp_time if comp_time > 0 else 0

        print(f"\n⚡ 速度对比:")
        print(f"  原始模型推理时间:       {orig_time:.4f} 秒")
        print(f"  压缩模型推理时间:       {comp_time:.4f} 秒")
        print(f"  加速比:                 {speed_ratio:.2f}x")

        # 内存使用对比
        orig_memory = original_results['memory_usage']['max_allocated_mb']
        comp_memory = compressed_results['memory_usage']['max_allocated_mb']
        memory_ratio = orig_memory / comp_memory if comp_memory > 0 else 0

        print(f"\n🧠 内存使用对比:")
        print(f"  原始模型峰值内存:       {orig_memory:.2f} MB")
        print(f"  压缩模型峰值内存:       {comp_memory:.2f} MB")
        print(f"  内存节省:               {memory_ratio:.2f}x")

        # 压缩效率总结
        print(f"\n📈 压缩效率总结:")
        print(f"  压缩权衡分数:           {self.calculate_compression_score(acc_loss_pct, size_ratio, speed_ratio):.2f}")
        print(f"  推荐等级:               {self.get_compression_recommendation(acc_loss_pct, size_ratio)}")

        return {
            'accuracy_loss_pct': acc_loss_pct,
            'size_compression_ratio': size_ratio,
            'speed_improvement_ratio': speed_ratio,
            'memory_reduction_ratio': memory_ratio
        }

    def calculate_compression_score(self, acc_loss_pct, size_ratio, speed_ratio):
        """计算压缩效率得分"""
        # 综合考虑准确率损失、压缩率和速度提升
        score = (size_ratio * 0.4 + speed_ratio * 0.3 - acc_loss_pct * 0.03) * 10
        return max(0, min(100, score))

    def get_compression_recommendation(self, acc_loss_pct, size_ratio):
        """给出压缩推荐等级"""
        if acc_loss_pct < 5 and size_ratio > 3:
            return "⭐⭐⭐⭐⭐ 优秀"
        elif acc_loss_pct < 10 and size_ratio > 2:
            return "⭐⭐⭐⭐ 良好"
        elif acc_loss_pct < 15 and size_ratio > 2:
            return "⭐⭐⭐ 可接受"
        elif acc_loss_pct < 20:
            return "⭐⭐ 需要改进"
        else:
            return "⭐ 不推荐"


# 使用示例
def evaluate_compression_pipeline(original_model, compressed_model, test_loader,
                                  original_model_path=None, compressed_model_path=None):
    """完整的压缩模型评估流程"""

    evaluator = ModelEvaluator()

    print("开始全面评估模型压缩效果...")

    # 评估原始模型
    original_results = evaluator.evaluate_model_comprehensive(
        original_model, test_loader, original_model_path, "原始模型"
    )

    # 评估压缩模型
    compressed_results = evaluator.evaluate_model_comprehensive(
        compressed_model, test_loader, compressed_model_path, "压缩模型"
    )

    # 模型对比
    comparison_results = evaluator.compare_models(original_results, compressed_results)

    # 保存结果
    results = {
        'original_model': original_results,
        'compressed_model': compressed_results,
        'comparison': comparison_results,
        'evaluation_time': datetime.now().isoformat()
    }

    # 保存到JSON文件
    with open('./output/comprehensive_evaluation.json', 'w', encoding='utf-8') as f:
        json.dump(results, f, indent=2, ensure_ascii=False)

    print(f"\n✅ 评估完成！详细结果已保存到: ./output/comprehensive_evaluation.json")

    return results


