#!/usr/bin/env python3
"""
内存使用测试脚本 - 监控和分析PyTorch的内存使用情况
"""

import torch
import time
import psutil
import gc
import matplotlib.pyplot as plt
import matplotlib
import json
from datetime import datetime

# 解决中文显示问题
try:
    # 设置中文字体
    plt.rcParams['font.sans-serif'] = ['Arial Unicode MS', 'SimHei', 'DejaVu Sans']
    plt.rcParams['axes.unicode_minus'] = False
    matplotlib.rcParams['font.family'] = 'sans-serif'
except:
    print("警告: 无法设置中文字体，图表可能无法正确显示中文")

class MemoryMonitor:
    def __init__(self):
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.memory_records = []
        
    def get_system_memory_info(self):
        """获取系统内存信息"""
        memory = psutil.virtual_memory()
        return {
            'total': memory.total,
            'available': memory.available,
            'used': memory.used,
            'percent': memory.percent
        }
    
    def get_gpu_memory_info(self):
        """获取GPU内存信息"""
        if self.device.type != 'cuda':
            return {}
        
        return {
            'allocated': torch.cuda.memory_allocated(),
            'reserved': torch.cuda.memory_reserved(),
            'max_allocated': torch.cuda.max_memory_allocated(),
            'max_reserved': torch.cuda.max_memory_reserved()
        }
    
    def record_memory_usage(self, operation_name=""):
        """记录当前内存使用情况"""
        record = {
            'timestamp': time.time(),
            'operation': operation_name,
            'system_memory': self.get_system_memory_info(),
            'gpu_memory': self.get_gpu_memory_info()
        }
        self.memory_records.append(record)
        return record
    
    def test_memory_allocation(self, sizes=[1000, 5000, 10000]):
        """测试内存分配和释放"""
        print("\n" + "=" * 50)
        print("内存分配测试")
        print("=" * 50)
        
        allocation_results = {}
        
        for size in sizes:
            print(f"\n测试张量大小: {size}x{size}")
            
            # 记录初始内存状态
            self.record_memory_usage(f"初始状态_{size}")
            
            # 分配内存
            start_time = time.time()
            tensor = torch.randn(size, size, device=self.device)
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            allocation_time = time.time() - start_time
            
            # 记录分配后内存状态
            self.record_memory_usage(f"分配后_{size}")
            
            # 执行一些操作
            start_time = time.time()
            result = tensor @ tensor.T
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            operation_time = time.time() - start_time
            
            # 记录操作后内存状态
            self.record_memory_usage(f"操作后_{size}")
            
            # 释放内存
            start_time = time.time()
            del tensor, result
            if self.device.type == 'cuda':
                torch.cuda.empty_cache()
            gc.collect()
            release_time = time.time() - start_time
            
            # 记录释放后内存状态
            self.record_memory_usage(f"释放后_{size}")
            
            print(f"  分配时间: {allocation_time:.4f} 秒")
            print(f"  操作时间: {operation_time:.4f} 秒")
            print(f"  释放时间: {release_time:.4f} 秒")
            
            allocation_results[size] = {
                'allocation_time': allocation_time,
                'operation_time': operation_time,
                'release_time': release_time
            }
        
        return allocation_results
    
    def test_memory_leak(self, iterations=100):
        """测试内存泄漏"""
        print("\n" + "=" * 50)
        print("内存泄漏测试")
        print("=" * 50)
        
        initial_memory = self.get_gpu_memory_info() if self.device.type == 'cuda' else {}
        
        for i in range(iterations):
            # 创建临时张量
            tensor = torch.randn(1000, 1000, device=self.device)
            # 执行一些操作
            result = tensor @ tensor.T
            # 不显式释放，测试是否有内存泄漏
            
            if i % 10 == 0:
                current_memory = self.get_gpu_memory_info() if self.device.type == 'cuda' else {}
                print(f"迭代 {i}: 已分配内存: {current_memory.get('allocated', 0) / 1024**2:.2f} MB")
        
        final_memory = self.get_gpu_memory_info() if self.device.type == 'cuda' else {}
        
        # 清理内存
        gc.collect()
        if self.device.type == 'cuda':
            torch.cuda.empty_cache()
        
        cleaned_memory = self.get_gpu_memory_info() if self.device.type == 'cuda' else {}
        
        print(f"\n内存泄漏测试结果:")
        if self.device.type == 'cuda':
            print(f"  初始内存: {initial_memory.get('allocated', 0) / 1024**2:.2f} MB")
            print(f"  最终内存: {final_memory.get('allocated', 0) / 1024**2:.2f} MB")
            print(f"  清理后内存: {cleaned_memory.get('allocated', 0) / 1024**2:.2f} MB")
            
            memory_increase = final_memory.get('allocated', 0) - initial_memory.get('allocated', 0)
            print(f"  内存增长: {memory_increase / 1024**2:.2f} MB")
            
            if memory_increase > 10 * 1024 * 1024:  # 10MB阈值
                print("  警告: 检测到可能的内存泄漏!")
            else:
                print("  内存使用正常，未检测到明显泄漏")
        else:
            print("  GPU不可用，跳过GPU内存泄漏测试")
    
    def test_batch_size_memory_impact(self, batch_sizes=[1, 8, 16, 32, 64]):
        """测试批量大小对内存使用的影响"""
        print("\n" + "=" * 50)
        print("批量大小对内存使用的影响测试")
        print("=" * 50)
        
        batch_results = {}
        
        for batch_size in batch_sizes:
            print(f"\n测试批量大小: {batch_size}")
            
            # 记录初始内存
            initial_memory = self.record_memory_usage(f"初始状态_batch_{batch_size}")
            
            # 创建模型和数据
            model = torch.nn.Sequential(
                torch.nn.Linear(1000, 512),
                torch.nn.ReLU(),
                torch.nn.Linear(512, 256),
                torch.nn.ReLU(),
                torch.nn.Linear(256, 10)
            ).to(self.device)
            
            data = torch.randn(batch_size, 1000, device=self.device)
            target = torch.randint(0, 10, (batch_size,), device=self.device)
            
            # 记录模型加载后内存
            self.record_memory_usage(f"模型加载_batch_{batch_size}")
            
            # 前向传播
            start_time = time.time()
            output = model(data)
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            forward_time = time.time() - start_time
            
            # 记录前向传播后内存
            self.record_memory_usage(f"前向传播_batch_{batch_size}")
            
            # 反向传播
            start_time = time.time()
            loss = torch.nn.functional.cross_entropy(output, target)
            loss.backward()
            if self.device.type == 'cuda':
                torch.cuda.synchronize()
            backward_time = time.time() - start_time
            
            # 记录反向传播后内存
            self.record_memory_usage(f"反向传播_batch_{batch_size}")
            
            # 清理
            del model, data, target, output, loss
            gc.collect()
            if self.device.type == 'cuda':
                torch.cuda.empty_cache()
            
            print(f"  前向传播时间: {forward_time:.4f} 秒")
            print(f"  反向传播时间: {backward_time:.4f} 秒")
            
            batch_results[batch_size] = {
                'forward_time': forward_time,
                'backward_time': backward_time
            }
        
        return batch_results
    
    def generate_memory_report(self):
        """生成内存使用报告"""
        print("\n" + "=" * 50)
        print("内存使用报告")
        print("=" * 50)
        
        report_data = {
            'device': str(self.device),
            'timestamp': datetime.now().isoformat(),
            'memory_records': self.memory_records
        }
        
        # 保存报告到JSON文件
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        report_filename = f"results/memory_report_{timestamp}.json"
        
        with open(report_filename, 'w') as f:
            json.dump(report_data, f, indent=2, default=str)
        
        print(f"内存报告已保存到: {report_filename}")
        
        # 分析内存使用模式
        if self.memory_records:
            print("\n内存使用分析:")
            initial_memory = self.memory_records[0]['gpu_memory'].get('allocated', 0) if self.device.type == 'cuda' else 0
            max_memory = max(record['gpu_memory'].get('allocated', 0) for record in self.memory_records) if self.device.type == 'cuda' else 0
            
            if self.device.type == 'cuda':
                print(f"  初始GPU内存: {initial_memory / 1024**2:.2f} MB")
                print(f"  最大GPU内存: {max_memory / 1024**2:.2f} MB")
                print(f"  内存峰值: {(max_memory - initial_memory) / 1024**2:.2f} MB")
        
        return report_filename
    
    def plot_memory_usage(self):
        """绘制内存使用图表"""
        if not self.memory_records:
            print("没有内存记录可绘制")
            return
        
        # 创建图表
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('PyTorch内存使用分析', fontsize=16)
        
        # 提取时间序列数据
        timestamps = [record['timestamp'] for record in self.memory_records]
        operations = [record['operation'] for record in self.memory_records]
        
        # 绘制GPU内存使用
        if self.device.type == 'cuda':
            ax = axes[0, 0]
            gpu_allocated = [record['gpu_memory'].get('allocated', 0) / 1024**2 for record in self.memory_records]
            gpu_reserved = [record['gpu_memory'].get('reserved', 0) / 1024**2 for record in self.memory_records]
            
            ax.plot(timestamps, gpu_allocated, 'o-', label='已分配内存')
            ax.plot(timestamps, gpu_reserved, 's-', label='已保留内存')
            ax.set_xlabel('时间')
            ax.set_ylabel('内存 (MB)')
            ax.set_title('GPU内存使用')
            ax.legend()
            ax.grid(True)
            
            # 添加操作标签
            for i, op in enumerate(operations):
                if i % 5 == 0:  # 每隔5个点标注一次
                    ax.annotate(op, (timestamps[i], gpu_allocated[i]), 
                               textcoords="offset points", xytext=(0,10), ha='center', fontsize=8)
        
        # 绘制系统内存使用
        ax = axes[0, 1]
        system_used = [record['system_memory']['used'] / 1024**3 for record in self.memory_records]
        system_available = [record['system_memory']['available'] / 1024**3 for record in self.memory_records]
        
        ax.plot(timestamps, system_used, 'o-', label='已使用内存')
        ax.plot(timestamps, system_available, 's-', label='可用内存')
        ax.set_xlabel('时间')
        ax.set_ylabel('内存 (GB)')
        ax.set_title('系统内存使用')
        ax.legend()
        ax.grid(True)
        
        # 绘制内存使用百分比
        ax = axes[1, 0]
        system_percent = [record['system_memory']['percent'] for record in self.memory_records]
        
        ax.plot(timestamps, system_percent, 'o-', color='red')
        ax.set_xlabel('时间')
        ax.set_ylabel('使用百分比 (%)')
        ax.set_title('系统内存使用百分比')
        ax.grid(True)
        ax.set_ylim(0, 100)
        
        # 绘制操作时间分布
        ax = axes[1, 1]
        # 这里可以添加操作时间的可视化
        
        plt.tight_layout()
        plot_filename = f"results/memory_plots_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
        plt.savefig(plot_filename, dpi=300, bbox_inches='tight')
        print(f"内存使用图表已保存到: {plot_filename}")
        plt.show()

def main():
    """主函数"""
    print("开始PyTorch内存使用测试...")
    
    monitor = MemoryMonitor()
    
    # 记录初始内存状态
    monitor.record_memory_usage("测试开始")
    
    # 运行各种内存测试
    monitor.test_memory_allocation()
    monitor.test_memory_leak()
    monitor.test_batch_size_memory_impact()
    
    # 记录最终内存状态
    monitor.record_memory_usage("测试结束")
    
    # 生成报告和图表
    report_file = monitor.generate_memory_report()
    monitor.plot_memory_usage()
    
    print(f"\n内存使用测试完成!")
    print(f"详细报告: {report_file}")

if __name__ == "__main__":
    main()
