import torch
import numpy as np
import time
import psutil
import GPUtil
from typing import Dict, List
import matplotlib.pyplot as plt

class ModelBenchmark:
    """模型性能基准测试"""
    
    def __init__(self):
        self.results = {}
    
    def benchmark_model(self, model, model_name, test_loader, device, 
                       warmup_runs=10, test_runs=100):
        """基准测试模型性能"""
        print(f"开始基准测试: {model_name}")
        
        model.eval()
        times = []
        memory_usages = []
        
        # 预热运行
        with torch.no_grad():
            for _ in range(warmup_runs):
                test_input = torch.randn(1, 3, 64, 64).to(device)
                _ = model(test_input)
            
            if device.type == 'cuda':
                torch.cuda.synchronize()
        
        # 正式测试
        with torch.no_grad():
            for i in range(test_runs):
                # 记录开始时间和内存
                start_time = time.time()
                start_memory = self.get_memory_usage(device)
                
                # 使用真实数据或随机数据
                if test_loader is not None:
                    for lr_imgs, _ in test_loader:
                        lr_imgs = lr_imgs.to(device)
                        _ = model(lr_imgs)
                        break
                else:
                    test_input = torch.randn(1, 3, 64, 64).to(device)
                    _ = model(test_input)
                
                if device.type == 'cuda':
                    torch.cuda.synchronize()
                
                # 记录结束时间和内存
                end_time = time.time()
                end_memory = self.get_memory_usage(device)
                
                times.append(end_time - start_time)
                memory_usages.append(end_memory - start_memory)
        
        # 统计结果
        avg_time = np.mean(times)
        avg_memory = np.mean(memory_usages)
        fps = 1.0 / avg_time if avg_time > 0 else 0
        
        self.results[model_name] = {
            'average_time': avg_time,
            'average_memory': avg_memory,
            'fps': fps,
            'std_time': np.std(times),
            'min_time': np.min(times),
            'max_time': np.max(times)
        }
        
        print(f"{model_name} - 平均时间: {avg_time*1000:.2f}ms, FPS: {fps:.2f}, "
              f"内存: {avg_memory/1024/1024:.2f}MB")
        
        return self.results[model_name]
    
    def get_memory_usage(self, device):
        """获取内存使用情况"""
        if device.type == 'cuda':
            return torch.cuda.memory_allocated(device)
        else:
            process = psutil.Process()
            return process.memory_info().rss
    
    def compare_models(self, models_dict, test_loader, device):
        """比较多个模型性能"""
        for name, model in models_dict.items():
            self.benchmark_model(model, name, test_loader, device)
        
        return self.results
    
    def generate_comparison_report(self, output_path="benchmark_comparison.png"):
        """生成性能对比报告"""
        if not self.results:
            print("没有基准测试数据")
            return
        
        model_names = list(self.results.keys())
        times = [self.results[name]['average_time'] * 1000 for name in model_names]  # 转为毫秒
        fps_values = [self.results[name]['fps'] for name in model_names]
        memory_values = [self.results[name]['average_memory'] / 1024 / 1024 for name in model_names]  # 转为MB
        
        fig, axes = plt.subplots(1, 3, figsize=(15, 5))
        
        # 推理时间对比
        axes[0].bar(model_names, times, color='skyblue')
        axes[0].set_title('平均推理时间 (ms)')
        axes[0].set_ylabel('时间 (ms)')
        axes[0].tick_params(axis='x', rotation=45)
        
        # FPS对比
        axes[1].bar(model_names, fps_values, color='lightgreen')
        axes[1].set_title('FPS')
        axes[1].set_ylabel('FPS')
        axes[1].tick_params(axis='x', rotation=45)
        
        # 内存使用对比
        axes[2].bar(model_names, memory_values, color='lightcoral')
        axes[2].set_title('内存使用 (MB)')
        axes[2].set_ylabel('内存 (MB)')
        axes[2].tick_params(axis='x', rotation=45)
        
        plt.tight_layout()
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"性能对比图保存到: {output_path}")
        
        return fig

class AccuracyEvaluator:
    """精度评估器"""
    
    def __init__(self):
        self.metrics_history = {}
    
    def evaluate_model_accuracy(self, model, test_loader, device, model_name="模型"):
        """评估模型精度"""
        from skimage.metrics import peak_signal_noise_ratio as psnr
        from skimage.metrics import structural_similarity as ssim
        
        model.eval()
        psnr_values = []
        ssim_values = []
        
        with torch.no_grad():
            for lr_imgs, hr_imgs in test_loader:
                lr_imgs = lr_imgs.to(device)
                hr_imgs = hr_imgs.to(device)
                
                outputs = model(lr_imgs)
                
                # 转换为numpy计算指标
                outputs_np = outputs.cpu().numpy().transpose(0, 2, 3, 1)
                hr_imgs_np = hr_imgs.cpu().numpy().transpose(0, 2, 3, 1)
                
                for i in range(outputs_np.shape[0]):
                    # 计算PSNR和SSIM
                    pred = np.clip(outputs_np[i] * 255, 0, 255).astype(np.uint8)
                    target = np.clip(hr_imgs_np[i] * 255, 0, 255).astype(np.uint8)
                    
                    psnr_val = psnr(target, pred, data_range=255)
                    ssim_val = ssim(target, pred, multichannel=True, channel_axis=2)
                    
                    psnr_values.append(psnr_val)
                    ssim_values.append(ssim_val)
        
        avg_psnr = np.mean(psnr_values)
        avg_ssim = np.mean(ssim_values)
        
        self.metrics_history[model_name] = {
            'psnr': avg_psnr,
            'ssim': avg_ssim,
            'psnr_std': np.std(psnr_values),
            'ssim_std': np.std(ssim_values)
        }
        
        print(f"{model_name} - PSNR: {avg_psnr:.2f}dB, SSIM: {avg_ssim:.4f}")
        
        return self.metrics_history[model_name]
    
    def plot_speed_accuracy_tradeoff(self, benchmark_results, accuracy_results, 
                                   output_path="speed_accuracy_tradeoff.png"):
        """绘制速度-精度权衡图"""
        model_names = list(accuracy_results.keys())
        
        # 提取数据
        fps_values = [benchmark_results[name]['fps'] for name in model_names]
        psnr_values = [accuracy_results[name]['psnr'] for name in model_names]
        
        # 创建散点图
        plt.figure(figsize=(10, 6))
        
        for i, name in enumerate(model_names):
            plt.scatter(fps_values[i], psnr_values[i], s=100, alpha=0.7)
            plt.annotate(name, (fps_values[i], psnr_values[i]), 
                        xytext=(5, 5), textcoords='offset points')
        
        plt.xlabel('FPS (速度)')
        plt.ylabel('PSNR (dB) (精度)')
        plt.title('速度-精度权衡分析')
        plt.grid(True, alpha=0.3)
        
        # 添加 Pareto 前沿线（理想边界）
        if len(model_names) > 1:
            # 找到 Pareto 最优解
            points = np.column_stack((fps_values, psnr_values))
            pareto_points = self.find_pareto_front(points)
            
            if len(pareto_points) > 1:
                # 按FPS排序
                pareto_points = pareto_points[pareto_points[:, 0].argsort()]
                plt.plot(pareto_points[:, 0], pareto_points[:, 1], 
                        'r--', alpha=0.7, label='Pareto前沿')
                plt.legend()
        
        plt.tight_layout()
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        print(f"速度-精度权衡图保存到: {output_path}")
        
        return plt.gcf()
    
    def find_pareto_front(self, points):
        """找到Pareto前沿"""
        pareto_points = []
        
        for point in points:
            is_pareto = True
            for other in points:
                if (other[0] >= point[0] and other[1] >= point[1] and 
                    (other[0] > point[0] or other[1] > point[1])):
                    is_pareto = False
                    break
            if is_pareto:
                pareto_points.append(point)
        
        return np.array(pareto_points)

def comprehensive_evaluation():
    """综合评估流程"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 假设我们有多个模型
    models = {
        "原始模型": original_model,
        "微调模型": finetuned_model, 
        "轻量学生模型": student_model,
        "剪枝模型": pruned_model,
        "量化模型": quantized_model
    }
    
    # 性能基准测试
    benchmark = ModelBenchmark()
    benchmark_results = benchmark.compare_models(models, test_loader, device)
    
    # 精度评估
    accuracy_evaluator = AccuracyEvaluator()
    accuracy_results = {}
    
    for name, model in models.items():
        accuracy_results[name] = accuracy_evaluator.evaluate_model_accuracy(
            model, test_loader, device, name
        )
    
    # 生成报告
    benchmark.generate_comparison_report("performance_comparison.png")
    accuracy_evaluator.plot_speed_accuracy_tradeoff(
        benchmark_results, accuracy_results, "speed_accuracy_tradeoff.png"
    )
    
    # 输出推荐模型
    recommend_best_model(benchmark_results, accuracy_results)

def recommend_best_model(benchmark_results, accuracy_results):
    """推荐最佳模型"""
    print("\n=== 模型推荐分析 ===")
    
    for name in benchmark_results.keys():
        fps = benchmark_results[name]['fps']
        psnr = accuracy_results[name]['psnr']
        
        # 综合评分（可根据需求调整权重）
        speed_score = min(fps / 30, 1.0)  # 假设30FPS为满分
        accuracy_score = min((psnr - 20) / 10, 1.0)  # 假设20-30dB范围
        
        # 平衡评分（速度权重0.4，精度权重0.6）
        balanced_score = 0.4 * speed_score + 0.6 * accuracy_score
        
        print(f"{name}: FPS={fps:.1f}, PSNR={psnr:.2f}dB, 综合评分={balanced_score:.3f}")
    
    # 找到最佳平衡模型
    best_model = max(benchmark_results.keys(), 
                    key=lambda x: 0.4 * min(benchmark_results[x]['fps']/30, 1.0) + 
                                0.6 * min((accuracy_results[x]['psnr']-20)/10, 1.0))
    
    print(f"\n推荐模型: {best_model}")
    print(f"理由: 在速度和精度之间达到最佳平衡")

if __name__ == "__main__":
    # 这里需要真实的模型和数据集来运行
    # comprehensive_evaluation()
    print("综合评估模块准备就绪")