#!/usr/bin/env python3
"""
CPU vs GPU性能对比测试脚本
"""

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import json
from datetime import datetime

# 解决中文显示问题
try:
    # 设置中文字体
    plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'SimHei', 'DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False
    matplotlib.rcParams['font.family'] = 'sans-serif'
except:
    print("警告: 无法设置中文字体，图表可能无法正确显示中文")

class CPUvsGPUComparison:
    def __init__(self):
        self.results = {}
        self.cpu_device = torch.device('cpu')
        
        # 确定GPU设备
        if torch.cuda.is_available():
            self.gpu_device = torch.device('cuda')
            self.gpu_device_name = "CUDA GPU"
        elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
            self.gpu_device = torch.device('mps')
            self.gpu_device_name = "Apple MPS"
        else:
            self.gpu_device = None
            self.gpu_device_name = "无GPU"
        
        print(f"CPU设备: {self.cpu_device}")
        print(f"GPU设备: {self.gpu_device_name}")
    
    def compare_matrix_operations(self, sizes=[100, 500, 1000, 2000]):
        """对比矩阵操作性能"""
        print("\n" + "=" * 50)
        print("矩阵操作性能对比测试")
        print("=" * 50)
        
        comparison_results = {}
        
        for size in sizes:
            print(f"\n测试矩阵大小: {size}x{size}")
            
            # 创建测试数据
            cpu_A = torch.randn(size, size)
            cpu_B = torch.randn(size, size)
            
            # CPU矩阵乘法
            start_time = time.time()
            cpu_result = cpu_A @ cpu_B
            cpu_time = time.time() - start_time
            print(f"  CPU矩阵乘法: {cpu_time:.4f} 秒")
            
            # GPU矩阵乘法（如果可用）
            gpu_time = float('inf')
            if self.gpu_device:
                # 数据传输到GPU
                start_time = time.time()
                gpu_A = cpu_A.to(self.gpu_device)
                gpu_B = cpu_B.to(self.gpu_device)
                transfer_time = time.time() - start_time
                
                # GPU计算
                start_time = time.time()
                gpu_result = gpu_A @ gpu_B
                
                # 同步设备
                if self.gpu_device.type == 'cuda':
                    torch.cuda.synchronize()
                elif self.gpu_device.type == 'mps':
                    torch.mps.synchronize()
                
                gpu_compute_time = time.time() - start_time
                
                # 数据传输回CPU
                start_time = time.time()
                gpu_result_cpu = gpu_result.cpu()
                transfer_back_time = time.time() - start_time
                
                gpu_total_time = transfer_time + gpu_compute_time + transfer_back_time
                gpu_time = gpu_compute_time  # 只计算纯计算时间
                
                print(f"  {self.gpu_device_name}矩阵乘法: {gpu_time:.4f} 秒")
                print(f"  数据传输时间: {transfer_time + transfer_back_time:.4f} 秒")
                
                # 验证结果一致性
                diff = torch.abs(cpu_result - gpu_result_cpu).max().item()
                print(f"  结果差异: {diff:.6f}")
                
                # 计算加速比
                if gpu_time > 0:
                    speedup = cpu_time / gpu_time
                    print(f"  计算加速比: {speedup:.2f}x")
                else:
                    print(f"  {self.gpu_device_name}时间过短，无法计算准确加速比")
            else:
                print("  GPU不可用，跳过GPU测试")
            
            comparison_results[size] = {
                'cpu_time': cpu_time,
                'gpu_time': gpu_time,
                'speedup': cpu_time / gpu_time if gpu_time > 0 else 0
            }
        
        self.results['matrix_operations'] = comparison_results
        return comparison_results
    
    def compare_convolution_operations(self, batch_sizes=[1, 8, 16, 32]):
        """对比卷积操作性能"""
        print("\n" + "=" * 50)
        print("卷积操作性能对比测试")
        print("=" * 50)
        
        comparison_results = {}
        
        for batch_size in batch_sizes:
            print(f"\n测试批量大小: {batch_size}")
            
            # 创建卷积输入和权重
            cpu_input = torch.randn(batch_size, 3, 224, 224)
            cpu_conv = torch.nn.Conv2d(3, 64, kernel_size=3, padding=1)
            
            # CPU前向传播
            start_time = time.time()
            cpu_output = cpu_conv(cpu_input)
            cpu_forward_time = time.time() - start_time
            print(f"  CPU前向传播: {cpu_forward_time:.4f} 秒")
            
            # CPU反向传播
            start_time = time.time()
            cpu_output.sum().backward()
            cpu_backward_time = time.time() - start_time
            print(f"  CPU反向传播: {cpu_backward_time:.4f} 秒")
            
            # GPU卷积操作（如果可用）
            gpu_forward_time = float('inf')
            gpu_backward_time = float('inf')
            if self.gpu_device:
                # 数据传输到GPU
                start_time = time.time()
                gpu_input = cpu_input.to(self.gpu_device)
                gpu_conv = cpu_conv.to(self.gpu_device)
                transfer_time = time.time() - start_time
                
                # GPU前向传播
                start_time = time.time()
                gpu_output = gpu_conv(gpu_input)
                if self.gpu_device.type == 'cuda':
                    torch.cuda.synchronize()
                elif self.gpu_device.type == 'mps':
                    torch.mps.synchronize()
                gpu_forward_time = time.time() - start_time
                print(f"  {self.gpu_device_name}前向传播: {gpu_forward_time:.4f} 秒")
                
                # GPU反向传播
                start_time = time.time()
                gpu_output.sum().backward()
                if self.gpu_device.type == 'cuda':
                    torch.cuda.synchronize()
                elif self.gpu_device.type == 'mps':
                    torch.mps.synchronize()
                gpu_backward_time = time.time() - start_time
                print(f"  {self.gpu_device_name}反向传播: {gpu_backward_time:.4f} 秒")
                
                print(f"  数据传输时间: {transfer_time:.4f} 秒")
                
                # 计算加速比
                if gpu_forward_time > 0:
                    forward_speedup = cpu_forward_time / gpu_forward_time
                    print(f"  前向传播加速比: {forward_speedup:.2f}x")
                
                if gpu_backward_time > 0:
                    backward_speedup = cpu_backward_time / gpu_backward_time
                    print(f"  反向传播加速比: {backward_speedup:.2f}x")
            else:
                print("  GPU不可用，跳过GPU测试")
            
            comparison_results[batch_size] = {
                'cpu_forward': cpu_forward_time,
                'cpu_backward': cpu_backward_time,
                'gpu_forward': gpu_forward_time,
                'gpu_backward': gpu_backward_time,
                'forward_speedup': cpu_forward_time / gpu_forward_time if gpu_forward_time > 0 else 0,
                'backward_speedup': cpu_backward_time / gpu_backward_time if gpu_backward_time > 0 else 0
            }
        
        self.results['convolution_operations'] = comparison_results
        return comparison_results
    
    def compare_autograd_operations(self, sizes=[1000, 5000, 10000]):
        """对比自动微分性能"""
        print("\n" + "=" * 50)
        print("自动微分性能对比测试")
        print("=" * 50)
        
        comparison_results = {}
        
        for size in sizes:
            print(f"\n测试张量大小: {size}")
            
            # CPU自动微分
            cpu_x = torch.randn(size, size, requires_grad=True)
            cpu_y = torch.randn(size, size, requires_grad=True)
            
            start_time = time.time()
            cpu_z = (cpu_x @ cpu_y).sin().sum()
            cpu_forward_time = time.time() - start_time
            
            start_time = time.time()
            cpu_z.backward()
            cpu_backward_time = time.time() - start_time
            
            print(f"  CPU前向传播: {cpu_forward_time:.4f} 秒")
            print(f"  CPU反向传播: {cpu_backward_time:.4f} 秒")
            
            # GPU自动微分（如果可用）
            gpu_forward_time = float('inf')
            gpu_backward_time = float('inf')
            if self.gpu_device:
                # 数据传输到GPU
                start_time = time.time()
                gpu_x = cpu_x.to(self.gpu_device)
                gpu_y = cpu_y.to(self.gpu_device)
                transfer_time = time.time() - start_time
                
                # GPU前向传播
                start_time = time.time()
                gpu_z = (gpu_x @ gpu_y).sin().sum()
                if self.gpu_device.type == 'cuda':
                    torch.cuda.synchronize()
                elif self.gpu_device.type == 'mps':
                    torch.mps.synchronize()
                gpu_forward_time = time.time() - start_time
                print(f"  {self.gpu_device_name}前向传播: {gpu_forward_time:.4f} 秒")
                
                # GPU反向传播
                start_time = time.time()
                gpu_z.backward()
                if self.gpu_device.type == 'cuda':
                    torch.cuda.synchronize()
                elif self.gpu_device.type == 'mps':
                    torch.mps.synchronize()
                gpu_backward_time = time.time() - start_time
                print(f"  {self.gpu_device_name}反向传播: {gpu_backward_time:.4f} 秒")
                
                print(f"  数据传输时间: {transfer_time:.4f} 秒")
                
                # 计算加速比
                if gpu_forward_time > 0:
                    forward_speedup = cpu_forward_time / gpu_forward_time
                    print(f"  前向传播加速比: {forward_speedup:.2f}x")
                
                if gpu_backward_time > 0:
                    backward_speedup = cpu_backward_time / gpu_backward_time
                    print(f"  反向传播加速比: {backward_speedup:.2f}x")
            else:
                print("  GPU不可用，跳过GPU测试")
            
            comparison_results[size] = {
                'cpu_forward': cpu_forward_time,
                'cpu_backward': cpu_backward_time,
                'gpu_forward': gpu_forward_time,
                'gpu_backward': gpu_backward_time,
                'forward_speedup': cpu_forward_time / gpu_forward_time if gpu_forward_time > 0 else 0,
                'backward_speedup': cpu_backward_time / gpu_backward_time if gpu_backward_time > 0 else 0
            }
        
        self.results['autograd_operations'] = comparison_results
        return comparison_results
    
    def generate_comparison_report(self):
        """生成对比测试报告"""
        print("\n" + "=" * 50)
        print("CPU vs GPU性能对比报告")
        print("=" * 50)
        
        # 保存结果到JSON文件
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        report_filename = f"results/comparison_report_{timestamp}.json"
        
        with open(report_filename, 'w') as f:
            json.dump(self.results, f, indent=2)
        
        print(f"对比报告已保存到: {report_filename}")
        
        # 生成总结
        print("\n性能对比总结:")
        for test_name, test_results in self.results.items():
            print(f"\n{test_name}:")
            if test_results:
                avg_speedup = 0
                count = 0
                for key, value in test_results.items():
                    if 'speedup' in value:
                        speedup = value['speedup']
                        if speedup > 0 and speedup < 1000:  # 过滤异常值
                            avg_speedup += speedup
                            count += 1
                    elif 'forward_speedup' in value:
                        forward_speedup = value['forward_speedup']
                        backward_speedup = value['backward_speedup']
                        if forward_speedup > 0 and forward_speedup < 1000:
                            avg_speedup += forward_speedup
                            count += 1
                        if backward_speedup > 0 and backward_speedup < 1000:
                            avg_speedup += backward_speedup
                            count += 1
                
                if count > 0:
                    avg_speedup /= count
                    print(f"  平均加速比: {avg_speedup:.2f}x")
        
        return report_filename
    
    def plot_comparison_results(self):
        """绘制对比测试结果图表"""
        if not self.results:
            print("没有对比结果可绘制")
            return
        
        # 创建图表
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('CPU vs GPU性能对比分析', fontsize=16)
        
        # 绘制矩阵操作性能对比
        if 'matrix_operations' in self.results:
            ax = axes[0, 0]
            matrix_data = self.results['matrix_operations']
            sizes = list(matrix_data.keys())
            cpu_times = [matrix_data[size]['cpu_time'] for size in sizes]
            gpu_times = [matrix_data[size]['gpu_time'] for size in sizes]
            
            ax.plot(sizes, cpu_times, 'o-', label='CPU')
            ax.plot(sizes, gpu_times, 's-', label=self.gpu_device_name)
            ax.set_xlabel('矩阵大小')
            ax.set_ylabel('时间 (秒)')
            ax.set_title('矩阵乘法性能对比')
            ax.legend()
            ax.grid(True)
        
        # 绘制卷积操作性能对比
        if 'convolution_operations' in self.results:
            ax = axes[0, 1]
            conv_data = self.results['convolution_operations']
            batch_sizes = list(conv_data.keys())
            cpu_forward_times = [conv_data[bs]['cpu_forward'] for bs in batch_sizes]
            gpu_forward_times = [conv_data[bs]['gpu_forward'] for bs in batch_sizes]
            
            ax.plot(batch_sizes, cpu_forward_times, 'o-', label='CPU前向传播')
            ax.plot(batch_sizes, gpu_forward_times, 's-', label=f'{self.gpu_device_name}前向传播')
            ax.set_xlabel('批量大小')
            ax.set_ylabel('时间 (秒)')
            ax.set_title('卷积前向传播性能对比')
            ax.legend()
            ax.grid(True)
        
        # 绘制自动微分性能对比
        if 'autograd_operations' in self.results:
            ax = axes[1, 0]
            autograd_data = self.results['autograd_operations']
            sizes = list(autograd_data.keys())
            cpu_forward_times = [autograd_data[size]['cpu_forward'] for size in sizes]
            gpu_forward_times = [autograd_data[size]['gpu_forward'] for size in sizes]
            
            ax.plot(sizes, cpu_forward_times, 'o-', label='CPU前向传播')
            ax.plot(sizes, gpu_forward_times, 's-', label=f'{self.gpu_device_name}前向传播')
            ax.set_xlabel('张量大小')
            ax.set_ylabel('时间 (秒)')
            ax.set_title('自动微分前向传播性能对比')
            ax.legend()
            ax.grid(True)
        
        # 绘制加速比
        ax = axes[1, 1]
        all_speedups = []
        labels = []
        
        if 'matrix_operations' in self.results:
            matrix_data = self.results['matrix_operations']
            for size, data in matrix_data.items():
                if data['speedup'] > 0 and data['speedup'] < 1000:
                    all_speedups.append(data['speedup'])
                    labels.append(f'矩阵{size}')
        
        if 'convolution_operations' in self.results:
            conv_data = self.results['convolution_operations']
            for batch_size, data in conv_data.items():
                if data['forward_speedup'] > 0 and data['forward_speedup'] < 1000:
                    all_speedups.append(data['forward_speedup'])
                    labels.append(f'卷积前向{batch_size}')
                if data['backward_speedup'] > 0 and data['backward_speedup'] < 1000:
                    all_speedups.append(data['backward_speedup'])
                    labels.append(f'卷积反向{batch_size}')
        
        if 'autograd_operations' in self.results:
            autograd_data = self.results['autograd_operations']
            for size, data in autograd_data.items():
                if data['forward_speedup'] > 0 and data['forward_speedup'] < 1000:
                    all_speedups.append(data['forward_speedup'])
                    labels.append(f'自动微分前向{size}')
                if data['backward_speedup'] > 0 and data['backward_speedup'] < 1000:
                    all_speedups.append(data['backward_speedup'])
                    labels.append(f'自动微分反向{size}')
        
        if all_speedups:
            x_pos = np.arange(len(all_speedups))
            ax.bar(x_pos, all_speedups, alpha=0.7)
            ax.set_xlabel('测试场景')
            ax.set_ylabel('加速比 (CPU/GPU)')
            ax.set_title('各场景加速比对比')
            ax.set_xticks(x_pos)
            ax.set_xticklabels(labels, rotation=45, ha='right')
            ax.grid(True, axis='y')
            
            # 添加数值标签
            for i, v in enumerate(all_speedups):
                ax.text(i, v + 0.1, f'{v:.2f}x', ha='center', va='bottom')
        else:
            ax.text(0.5, 0.5, '无有效的加速比数据', ha='center', va='center', transform=ax.transAxes)
        
        plt.tight_layout()
        plot_filename = f"results/comparison_plots_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
        plt.savefig(plot_filename, dpi=300, bbox_inches='tight')
        print(f"对比图表已保存到: {plot_filename}")
        plt.show()

def main():
    """主函数"""
    print("开始CPU vs GPU性能对比测试...")
    
    comparison = CPUvsGPUComparison()
    
    # 运行各种对比测试
    comparison.compare_matrix_operations()
    comparison.compare_convolution_operations()
    comparison.compare_autograd_operations()
    
    # 生成报告和图表
    report_file = comparison.generate_comparison_report()
    comparison.plot_comparison_results()
    
    print(f"\nCPU vs GPU性能对比测试完成!")
    print(f"详细对比报告: {report_file}")

if __name__ == "__main__":
    main()
