#!/usr/bin/env python3
"""
性能基准测试脚本 - 测试PyTorch在不同操作上的性能
"""

import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import json
from datetime import datetime

# 解决中文显示问题
try:
    # 设置中文字体
    plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'SimHei', 'DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False
    matplotlib.rcParams['font.family'] = 'sans-serif'
except:
    print("警告: 无法设置中文字体，图表可能无法正确显示中文")

class PerformanceBenchmark:
    def __init__(self):
        self.results = {}
        
        # 确定设备
        if torch.cuda.is_available():
            self.device = torch.device('cuda')
            self.device_name = "CUDA GPU"
        elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
            self.device = torch.device('mps')
            self.device_name = "Apple MPS"
        else:
            self.device = torch.device('cpu')
            self.device_name = "CPU"
        
        print(f"使用设备: {self.device_name}")
        
    def benchmark_matrix_operations(self, sizes=[100, 500, 1000, 2000]):
        """基准测试矩阵操作"""
        print("\n" + "=" * 50)
        print("矩阵操作性能基准测试")
        print("=" * 50)
        
        matrix_results = {}
        
        for size in sizes:
            print(f"\n测试矩阵大小: {size}x{size}")
            
            # 创建测试数据
            A = torch.randn(size, size, device=self.device)
            B = torch.randn(size, size, device=self.device)
            
            # 矩阵乘法
            start_time = time.time()
            C = A @ B
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            elif self.device.type == 'mps':
                torch.mps.synchronize()
            matmul_time = time.time() - start_time
            print(f"  矩阵乘法: {matmul_time:.4f} 秒")
            
            # 矩阵转置
            start_time = time.time()
            A_T = A.T
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            elif self.device.type == 'mps':
                torch.mps.synchronize()
            transpose_time = time.time() - start_time
            print(f"  矩阵转置: {transpose_time:.4f} 秒")
            
            # 矩阵求逆
            if size <= 1000:  # 大矩阵求逆可能很慢
                start_time = time.time()
                try:
                    A_inv = torch.inverse(A)
                    if self.device.type == 'cuda':
                        torch.cuda.synchronize()
                    elif self.device.type == 'mps':
                        torch.mps.synchronize()
                    inverse_time = time.time() - start_time
                    print(f"  矩阵求逆: {inverse_time:.4f} 秒")
                except:
                    inverse_time = float('inf')
                    print(f"  矩阵求逆: 失败")
            else:
                inverse_time = float('inf')
                print(f"  矩阵求逆: 跳过（矩阵过大）")
            
            matrix_results[size] = {
                'matmul': matmul_time,
                'transpose': transpose_time,
                'inverse': inverse_time
            }
        
        self.results['matrix_operations'] = matrix_results
        return matrix_results
    
    def benchmark_convolution_operations(self, batch_sizes=[1, 8, 16, 32]):
        """基准测试卷积操作"""
        print("\n" + "=" * 50)
        print("卷积操作性能基准测试")
        print("=" * 50)
        
        conv_results = {}
        
        for batch_size in batch_sizes:
            print(f"\n测试批量大小: {batch_size}")
            
            # 创建卷积输入和权重
            input_tensor = torch.randn(batch_size, 3, 224, 224, device=self.device)
            conv_layer = torch.nn.Conv2d(3, 64, kernel_size=3, padding=1).to(self.device)
            
            # 前向传播
            start_time = time.time()
            output = conv_layer(input_tensor)
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            elif self.device.type == 'mps':
                torch.mps.synchronize()
            forward_time = time.time() - start_time
            print(f"  卷积前向传播: {forward_time:.4f} 秒")
            
            # 反向传播
            start_time = time.time()
            output.sum().backward()
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            elif self.device.type == 'mps':
                torch.mps.synchronize()
            backward_time = time.time() - start_time
            print(f"  卷积反向传播: {backward_time:.4f} 秒")
            
            conv_results[batch_size] = {
                'forward': forward_time,
                'backward': backward_time
            }
        
        self.results['convolution_operations'] = conv_results
        return conv_results
    
    def benchmark_autograd_operations(self, sizes=[1000, 5000, 10000]):
        """基准测试自动微分操作"""
        print("\n" + "=" * 50)
        print("自动微分性能基准测试")
        print("=" * 50)
        
        autograd_results = {}
        
        for size in sizes:
            print(f"\n测试张量大小: {size}")
            
            # 创建需要梯度的张量
            x = torch.randn(size, size, requires_grad=True, device=self.device)
            y = torch.randn(size, size, requires_grad=True, device=self.device)
            
            # 复杂计算图
            start_time = time.time()
            z = (x @ y).sin().sum()
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            elif self.device.type == 'mps':
                torch.mps.synchronize()
            forward_time = time.time() - start_time
            
            # 反向传播
            start_time = time.time()
            z.backward()
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            elif self.device.type == 'mps':
                torch.mps.synchronize()
            backward_time = time.time() - start_time
            
            print(f"  前向传播: {forward_time:.4f} 秒")
            print(f"  反向传播: {backward_time:.4f} 秒")
            
            autograd_results[size] = {
                'forward': forward_time,
                'backward': backward_time
            }
        
        self.results['autograd_operations'] = autograd_results
        return autograd_results
    
    def benchmark_data_transfer(self, sizes=[1000, 5000, 10000]):
        """基准测试数据传输性能"""
        print("\n" + "=" * 50)
        print("数据传输性能基准测试")
        print("=" * 50)
        
        if self.device.type != 'cuda':
            print("GPU不可用，跳过数据传输测试")
            return {}
        
        transfer_results = {}
        
        for size in sizes:
            print(f"\n测试张量大小: {size}x{size}")
            
            # CPU到GPU传输
            cpu_tensor = torch.randn(size, size)
            start_time = time.time()
            gpu_tensor = cpu_tensor.cuda()
            torch.cuda.synchronize()
            cpu_to_gpu_time = time.time() - start_time
            print(f"  CPU->GPU传输: {cpu_to_gpu_time:.4f} 秒")
            
            # GPU到CPU传输
            start_time = time.time()
            cpu_tensor_back = gpu_tensor.cpu()
            torch.cuda.synchronize()
            gpu_to_cpu_time = time.time() - start_time
            print(f"  GPU->CPU传输: {gpu_to_cpu_time:.4f} 秒")
            
            transfer_results[size] = {
                'cpu_to_gpu': cpu_to_gpu_time,
                'gpu_to_cpu': gpu_to_cpu_time
            }
        
        self.results['data_transfer'] = transfer_results
        return transfer_results
    
    def generate_report(self):
        """生成性能测试报告"""
        print("\n" + "=" * 50)
        print("性能测试报告")
        print("=" * 50)
        
        # 保存结果到JSON文件
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        report_filename = f"results/performance_report_{timestamp}.json"
        
        with open(report_filename, 'w') as f:
            json.dump(self.results, f, indent=2)
        
        print(f"性能报告已保存到: {report_filename}")
        
        # 生成总结
        print("\n性能测试总结:")
        for test_name, test_results in self.results.items():
            print(f"\n{test_name}:")
            if test_results:
                for key, value in test_results.items():
                    if isinstance(value, dict):
                        print(f"  {key}: {len(value)} 个测试用例")
        
        return report_filename
    
    def plot_results(self):
        """绘制性能测试结果图表"""
        if not self.results:
            print("没有测试结果可绘制")
            return
        
        # 创建图表
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('PyTorch性能基准测试结果', fontsize=16)
        
        # 绘制矩阵操作性能
        if 'matrix_operations' in self.results:
            ax = axes[0, 0]
            matrix_data = self.results['matrix_operations']
            sizes = list(matrix_data.keys())
            matmul_times = [matrix_data[size]['matmul'] for size in sizes]
            
            ax.plot(sizes, matmul_times, 'o-', label='矩阵乘法')
            ax.set_xlabel('矩阵大小')
            ax.set_ylabel('时间 (秒)')
            ax.set_title('矩阵操作性能')
            ax.legend()
            ax.grid(True)
        
        # 绘制卷积操作性能
        if 'convolution_operations' in self.results:
            ax = axes[0, 1]
            conv_data = self.results['convolution_operations']
            batch_sizes = list(conv_data.keys())
            forward_times = [conv_data[bs]['forward'] for bs in batch_sizes]
            backward_times = [conv_data[bs]['backward'] for bs in batch_sizes]
            
            ax.plot(batch_sizes, forward_times, 'o-', label='前向传播')
            ax.plot(batch_sizes, backward_times, 's-', label='反向传播')
            ax.set_xlabel('批量大小')
            ax.set_ylabel('时间 (秒)')
            ax.set_title('卷积操作性能')
            ax.legend()
            ax.grid(True)
        
        # 绘制自动微分性能
        if 'autograd_operations' in self.results:
            ax = axes[1, 0]
            autograd_data = self.results['autograd_operations']
            sizes = list(autograd_data.keys())
            forward_times = [autograd_data[size]['forward'] for size in sizes]
            backward_times = [autograd_data[size]['backward'] for size in sizes]
            
            ax.plot(sizes, forward_times, 'o-', label='前向传播')
            ax.plot(sizes, backward_times, 's-', label='反向传播')
            ax.set_xlabel('张量大小')
            ax.set_ylabel('时间 (秒)')
            ax.set_title('自动微分性能')
            ax.legend()
            ax.grid(True)
        
        # 绘制数据传输性能
        if 'data_transfer' in self.results:
            ax = axes[1, 1]
            transfer_data = self.results['data_transfer']
            sizes = list(transfer_data.keys())
            cpu_to_gpu_times = [transfer_data[size]['cpu_to_gpu'] for size in sizes]
            gpu_to_cpu_times = [transfer_data[size]['gpu_to_cpu'] for size in sizes]
            
            ax.plot(sizes, cpu_to_gpu_times, 'o-', label='CPU->GPU')
            ax.plot(sizes, gpu_to_cpu_times, 's-', label='GPU->CPU')
            ax.set_xlabel('张量大小')
            ax.set_ylabel('时间 (秒)')
            ax.set_title('数据传输性能')
            ax.legend()
            ax.grid(True)
        
        plt.tight_layout()
        plot_filename = f"results/performance_plots_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
        plt.savefig(plot_filename, dpi=300, bbox_inches='tight')
        print(f"性能图表已保存到: {plot_filename}")
        plt.show()

def main():
    """主函数"""
    print("开始PyTorch性能基准测试...")
    
    benchmark = PerformanceBenchmark()
    
    # 运行各种性能测试
    benchmark.benchmark_matrix_operations()
    benchmark.benchmark_convolution_operations()
    benchmark.benchmark_autograd_operations()
    benchmark.benchmark_data_transfer()
    
    # 生成报告和图表
    report_file = benchmark.generate_report()
    benchmark.plot_results()
    
    print(f"\n性能基准测试完成!")
    print(f"详细报告: {report_file}")

if __name__ == "__main__":
    main()
