# utils/memory_optimizer.py
import torch
import gc
from typing import Dict

class MemoryOptimizer:
    """显存优化器 - 管理GPU显存使用"""
    
    def __init__(self):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.memory_stats = {
            'allocated': [],
            'cached': [],
            'peak_allocated': 0
        }
    
    def optimize_memory_usage(self, model, batch_size: int) -> Dict:
        """优化显存使用"""
        if self.device.type != 'cuda':
            return {'status': 'cpu_mode'}
        
        # 获取当前显存状态
        torch.cuda.synchronize()
        allocated = torch.cuda.memory_allocated() / 1024**3  # GB
        cached = torch.cuda.memory_cached() / 1024**3  # GB
        
        self.memory_stats['allocated'].append(allocated)
        self.memory_stats['cached'].append(cached)
        self.memory_stats['peak_allocated'] = max(self.memory_stats['peak_allocated'], allocated)
        
        # 如果显存使用过高，采取优化措施
        if allocated > 4.0:  # 如果使用超过4GB
            return self._apply_memory_optimizations(model, batch_size)
        
        return {
            'status': 'normal',
            'allocated_gb': allocated,
            'cached_gb': cached
        }
    
    def _apply_memory_optimizations(self, model, batch_size: int) -> Dict:
        """应用显存优化措施"""
        optimizations_applied = []
        
        # 1. 清理缓存
        torch.cuda.empty_cache()
        optimizations_applied.append('cleared_cache')
        
        # 2. 减少批量大小
        if batch_size > 16:
            new_batch_size = max(8, batch_size // 2)
            optimizations_applied.append(f'reduced_batch_size_{batch_size}_to_{new_batch_size}')
            batch_size = new_batch_size
        
        # 3. 使用梯度累积（如果支持）
        if hasattr(model, 'set_gradient_accumulation_steps'):
            model.set_gradient_accumulation_steps(2)
            optimizations_applied.append('enabled_gradient_accumulation')
        
        # 4. 强制垃圾回收
        gc.collect()
        torch.cuda.synchronize()
        
        # 检查优化后的显存使用
        allocated = torch.cuda.memory_allocated() / 1024**3
        
        return {
            'status': 'optimized',
            'allocated_gb': allocated,
            'optimizations': optimizations_applied,
            'recommended_batch_size': batch_size
        }
    
    def get_memory_usage_report(self) -> str:
        """获取显存使用报告"""
        if self.device.type != 'cuda':
            return "运行在CPU模式"
        
        allocated = torch.cuda.memory_allocated() / 1024**3
        cached = torch.cuda.memory_cached() / 1024**3
        
        report = f"""
显存使用报告:
- 当前分配: {allocated:.2f} GB
- 当前缓存: {cached:.2f} GB  
- 峰值分配: {self.memory_stats['peak_allocated']:.2f} GB
- 平均分配: {np.mean(self.memory_stats['allocated']):.2f} GB
"""
        return report
    
    def clear_memory(self):
        """清理显存"""
        if self.device.type == 'cuda':
            torch.cuda.empty_cache()
        gc.collect()