"""
VRAM Optimizer Nodes for ComfyUI

Provides GPU memory optimization and management functionality.
"""

import gc
import logging
import torch
from ..utils.memory_utils import optimize_vram, get_memory_info, format_bytes

# Configure logging
logger = logging.getLogger(__name__)

# ComfyUI-specific imports with fallback
try:
    import comfy.model_management
    COMFY_AVAILABLE = True
    logger.debug("ComfyUI model management available for VRAM optimization")
except ImportError:
    COMFY_AVAILABLE = False
    logger.debug("ComfyUI model management not available - using basic PyTorch methods")

class VRAMOptimizerNode:
    """
    VRAM Optimizer Node - Optimizes GPU memory usage
    """
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "trigger": ("INT", {
                    "default": 0,
                    "min": 0,
                    "max": 1000000,
                    "step": 1,
                    "display": "number"
                }),
                "optimization_level": (["Conservative", "Moderate", "Aggressive"], {
                    "default": "Moderate"
                }),
                "reset_peak_stats": ("BOOLEAN", {"default": True}),
            },
            "optional": {
                "target_device": ("INT", {
                    "default": -1,  # -1 means all devices
                    "min": -1,
                    "max": 7,
                    "step": 1,
                    "display": "number"
                }),
            }
        }
    
    RETURN_TYPES = (
        "STRING",      # Optimization report
        "INT",         # Total memory freed (bytes)
        "BOOLEAN",     # Success status
        "STRING",      # Recommendations
    )
    
    RETURN_NAMES = (
        "optimization_report",
        "memory_freed",
        "success",
        "recommendations",
    )
    
    FUNCTION = "optimize_vram"
    CATEGORY = "Memory Management"
    OUTPUT_NODE = True
    
    def optimize_vram(self, trigger, optimization_level="Moderate", reset_peak_stats=True, target_device=-1):
        """Optimize VRAM usage and return results"""
        
        if not torch.cuda.is_available():
            logger.warning("CUDA not available for VRAM optimization")
            return (
                "CUDA not available - VRAM optimization skipped",
                0,
                False,
                "Install CUDA to enable VRAM optimization"
            )
        
        # Input validation
        if target_device != -1 and (target_device < 0 or target_device >= torch.cuda.device_count()):
            error_msg = f"Invalid target device {target_device}. Available devices: 0-{torch.cuda.device_count()-1}"
            logger.error(error_msg)
            return (
                error_msg,
                0,
                False,
                "Check device ID and try again"
            )
        
        if optimization_level not in ["Conservative", "Moderate", "Aggressive"]:
            error_msg = f"Invalid optimization level: {optimization_level}"
            logger.error(error_msg)
            return (
                error_msg,
                0,
                False,
                "Use Conservative, Moderate, or Aggressive"
            )
        
        try:
            logger.info(f"Starting VRAM optimization: level={optimization_level}, device={target_device}")
            
            # Get initial VRAM state
            initial_memory = get_memory_info()
            
            # Perform optimization based on level
            optimization_result = self._perform_optimization(optimization_level, reset_peak_stats, target_device)
            
            # Get final VRAM state
            final_memory = get_memory_info()
            
            # Calculate total memory freed
            total_freed = self._calculate_total_freed(optimization_result)
            
            # Create report
            report = self._create_optimization_report(
                initial_memory,
                final_memory,
                optimization_result,
                optimization_level,
                target_device
            )
            
            # Generate recommendations
            recommendations = self._generate_recommendations(final_memory, optimization_level)
            
            logger.info(f"VRAM optimization completed: {total_freed} bytes freed")
            return (
                report,
                total_freed,
                True,
                recommendations
            )
            
        except Exception as e:
            error_report = f"VRAM optimization failed: {str(e)}"
            logger.error(error_report)
            return (
                error_report,
                0,
                False,
                "Check CUDA installation and GPU availability"
            )
    
    def _perform_optimization(self, level, reset_peak_stats, target_device):
        """Perform VRAM optimization based on level"""
        
        result = {}
        
        device_range = range(torch.cuda.device_count()) if target_device == -1 else [target_device]
        
        for device_id in device_range:
            if device_id >= torch.cuda.device_count():
                continue
                
            try:
                logger.debug(f"Optimizing device {device_id}")
                initial_allocated = torch.cuda.memory_allocated(device_id)
                initial_reserved = torch.cuda.memory_reserved(device_id)
                
                # ComfyUI-specific optimizations
                if COMFY_AVAILABLE:
                    try:
                        if level == "Aggressive":
                            logger.debug("Performing aggressive ComfyUI model unloading")
                            comfy.model_management.unload_all_models()
                            comfy.model_management.soft_empty_cache()
                        elif level == "Moderate":
                            logger.debug("Performing moderate ComfyUI cache clearing")
                            comfy.model_management.soft_empty_cache()
                    except Exception as e:
                        logger.warning(f"ComfyUI optimization failed for device {device_id}: {e}")
                
                # Conservative: Basic cache clearing
                if level in ["Conservative", "Moderate", "Aggressive"]:
                    torch.cuda.empty_cache()
                    torch.cuda.synchronize()
                
                # Moderate: Additional optimizations
                if level in ["Moderate", "Aggressive"]:
                    # Force garbage collection before clearing cache again
                    gc.collect()
                    torch.cuda.empty_cache()
                    
                    # Clear cached memory allocator
                    torch.cuda.synchronize()
                
                # Aggressive: Maximum optimization
                if level == "Aggressive":
                    # Multiple passes of cache clearing
                    for i in range(3):
                        gc.collect()
                        torch.cuda.empty_cache()
                        torch.cuda.synchronize()
                        logger.debug(f"Aggressive optimization pass {i+1} for device {device_id}")
                
                # Reset peak stats if requested
                if reset_peak_stats:
                    try:
                        torch.cuda.reset_peak_memory_stats(device_id)
                        torch.cuda.reset_accumulated_memory_stats(device_id)
                        logger.debug(f"Peak stats reset for device {device_id}")
                    except Exception as e:
                        logger.warning(f"Failed to reset peak stats for device {device_id}: {e}")
                
                final_allocated = torch.cuda.memory_allocated(device_id)
                final_reserved = torch.cuda.memory_reserved(device_id)
                
                device_result = {
                    'device_name': torch.cuda.get_device_name(device_id),
                    'initial_allocated': initial_allocated,
                    'final_allocated': final_allocated,
                    'initial_reserved': initial_reserved,
                    'final_reserved': final_reserved,
                    'freed_allocated': initial_allocated - final_allocated,
                    'freed_reserved': initial_reserved - final_reserved,
                    'optimization_level': level,
                    'peak_stats_reset': reset_peak_stats
                }
                
                result[f'device_{device_id}'] = device_result
                logger.debug(f"Device {device_id} optimization complete: {device_result['freed_allocated']} bytes freed")
                
            except Exception as e:
                logger.error(f"Optimization failed for device {device_id}: {e}")
                result[f'device_{device_id}'] = {'error': str(e)}
        
        return result
    
    def _calculate_total_freed(self, optimization_result):
        """Calculate total memory freed across all devices"""
        total_freed = 0
        for device_info in optimization_result.values():
            if isinstance(device_info, dict):
                total_freed += device_info.get('freed_allocated', 0) + device_info.get('freed_reserved', 0)
        return total_freed
    
    def _create_optimization_report(self, initial_memory, final_memory, optimization_result, level, target_device):
        """Create detailed optimization report"""
        
        report_lines = ["=== VRAM OPTIMIZATION REPORT ===\n"]
        
        # Configuration
        report_lines.append(f"Optimization Level: {level}")
        report_lines.append(f"Target Device: {'All Devices' if target_device == -1 else f'Device {target_device}'}")
        
        # Per-device results
        for device_key, device_info in optimization_result.items():
            if isinstance(device_info, dict):
                device_name = device_info.get('device_name', 'Unknown')
                device_id = device_key.split('_')[1]
                
                report_lines.append(f"\n{device_key.upper()} ({device_name}):")
                report_lines.append(f"  Initial Allocated: {format_bytes(device_info.get('initial_allocated', 0))}")
                report_lines.append(f"  Final Allocated: {format_bytes(device_info.get('final_allocated', 0))}")
                report_lines.append(f"  Allocated Freed: {format_bytes(device_info.get('freed_allocated', 0))}")
                
                report_lines.append(f"  Initial Reserved: {format_bytes(device_info.get('initial_reserved', 0))}")
                report_lines.append(f"  Final Reserved: {format_bytes(device_info.get('final_reserved', 0))}")
                report_lines.append(f"  Reserved Freed: {format_bytes(device_info.get('freed_reserved', 0))}")
                
                total_freed = device_info.get('freed_allocated', 0) + device_info.get('freed_reserved', 0)
                report_lines.append(f"  Total Freed: {format_bytes(total_freed)}")
        
        # Summary
        total_system_freed = self._calculate_total_freed(optimization_result)
        report_lines.append(f"\nSUMMARY:")
        report_lines.append(f"  Total Memory Freed: {format_bytes(total_system_freed)}")
        report_lines.append(f"  Optimization Level: {level}")
        report_lines.append(f"  Peak Stats Reset: {'Yes' if optimization_result.get(list(optimization_result.keys())[0], {}).get('peak_stats_reset', False) else 'No'}")
        
        return "\n".join(report_lines)
    
    def _generate_recommendations(self, memory_info, optimization_level):
        """Generate optimization recommendations"""
        
        recommendations = []
        
        # Check current VRAM usage
        for i in range(torch.cuda.device_count()):
            allocated = memory_info.get(f'gpu_{i}_allocated', 0)
            reserved = memory_info.get(f'gpu_{i}_reserved', 0)
            
            if allocated > 1024**3:  # > 1GB allocated
                recommendations.append(f"GPU {i}: High VRAM usage detected ({format_bytes(allocated)})")
            
            if reserved > allocated * 2:  # Reserved > 2x allocated
                recommendations.append(f"GPU {i}: High reserved memory ratio - consider aggressive cleanup")
        
        # Level-specific recommendations
        if optimization_level == "Conservative":
            recommendations.append("Consider using 'Moderate' optimization for better memory recovery")
        elif optimization_level == "Aggressive":
            recommendations.append("Using maximum optimization - monitor for any performance impacts")
        
        if not recommendations:
            recommendations.append("VRAM usage is optimal - no immediate action needed")
        
        return " | ".join(recommendations)


class VRAMUnloadNode:
    """
    VRAM Unload Node - Forcibly unloads models from VRAM
    """
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "trigger": ("INT", {
                    "default": 0,
                    "min": 0,
                    "max": 1000000,
                    "step": 1,
                    "display": "number"
                }),
                "unload_strategy": (["Smart", "Aggressive", "Complete"], {
                    "default": "Smart"
                }),
            },
            "optional": {
                "target_device": ("INT", {
                    "default": -1,  # -1 means all devices
                    "min": -1,
                    "max": 7,
                    "step": 1,
                    "display": "number"
                }),
                "force_unload": ("BOOLEAN", {"default": False}),
            }
        }
    
    RETURN_TYPES = (
        "STRING",      # Unload report
        "INT",         # Memory freed (bytes)
        "BOOLEAN",     # Success status
    )
    
    RETURN_NAMES = (
        "unload_report",
        "memory_freed",
        "success",
    )
    
    FUNCTION = "unload_vram"
    CATEGORY = "Memory Management"
    OUTPUT_NODE = True
    
    def unload_vram(self, trigger, unload_strategy="Smart", target_device=-1, force_unload=False):
        """Unload models from VRAM"""
        
        if not torch.cuda.is_available():
            return (
                "CUDA not available - VRAM unload skipped",
                0,
                False
            )
        
        try:
            # Get initial state
            initial_memory = get_memory_info()
            
            # Perform unload based on strategy
            unload_result = self._perform_unload(unload_strategy, target_device, force_unload)
            
            # Get final state
            final_memory = get_memory_info()
            
            # Calculate memory freed
            memory_freed = self._calculate_memory_freed(initial_memory, final_memory, target_device)
            
            # Create report
            report = self._create_unload_report(
                initial_memory,
                final_memory,
                unload_strategy,
                target_device,
                force_unload,
                memory_freed
            )
            
            return (
                report,
                memory_freed,
                True
            )
            
        except Exception as e:
            error_report = f"VRAM unload failed: {str(e)}"
            return (
                error_report,
                0,
                False
            )
    
    def _perform_unload(self, strategy, target_device, force_unload):
        """Perform VRAM unload based on strategy"""
        
        import gc
        
        device_range = range(torch.cuda.device_count()) if target_device == -1 else [target_device]
        
        # Smart: Targeted cleanup
        if strategy == "Smart":
            # Clear PyTorch cache
            for device_id in device_range:
                if device_id < torch.cuda.device_count():
                    torch.cuda.empty_cache()
                    torch.cuda.synchronize()
            
            # Single garbage collection pass
            gc.collect()
        
        # Aggressive: More thorough cleanup
        elif strategy == "Aggressive":
            # Multiple passes of cleanup
            for _ in range(3):
                gc.collect()
                for device_id in device_range:
                    if device_id < torch.cuda.device_count():
                        torch.cuda.empty_cache()
                        torch.cuda.synchronize()
        
        # Complete: Maximum cleanup
        elif strategy == "Complete":
            # Force Python garbage collection
            for _ in range(5):
                gc.collect()
            
            # Clear all CUDA caches multiple times
            for _ in range(3):
                for device_id in device_range:
                    if device_id < torch.cuda.device_count():
                        torch.cuda.empty_cache()
                        torch.cuda.synchronize()
                        
                        # Reset memory stats
                        torch.cuda.reset_peak_memory_stats(device_id)
                        torch.cuda.reset_accumulated_memory_stats(device_id)
            
            # Final cleanup
            gc.collect()
        
        return {"strategy": strategy, "devices": list(device_range), "force_unload": force_unload}
    
    def _calculate_memory_freed(self, initial_memory, final_memory, target_device):
        """Calculate memory freed by unload operation"""
        
        memory_freed = 0
        
        if target_device == -1:
            # Calculate for all devices
            for i in range(torch.cuda.device_count()):
                initial_allocated = initial_memory.get(f'gpu_{i}_allocated', 0)
                final_allocated = final_memory.get(f'gpu_{i}_allocated', 0)
                initial_reserved = initial_memory.get(f'gpu_{i}_reserved', 0)
                final_reserved = final_memory.get(f'gpu_{i}_reserved', 0)
                
                memory_freed += (initial_allocated - final_allocated) + (initial_reserved - final_reserved)
        else:
            # Calculate for specific device
            if target_device < torch.cuda.device_count():
                initial_allocated = initial_memory.get(f'gpu_{target_device}_allocated', 0)
                final_allocated = final_memory.get(f'gpu_{target_device}_allocated', 0)
                initial_reserved = initial_memory.get(f'gpu_{target_device}_reserved', 0)
                final_reserved = final_memory.get(f'gpu_{target_device}_reserved', 0)
                
                memory_freed = (initial_allocated - final_allocated) + (initial_reserved - final_reserved)
        
        return max(0, memory_freed)  # Ensure non-negative
    
    def _create_unload_report(self, initial_memory, final_memory, strategy, target_device, force_unload, memory_freed):
        """Create detailed unload report"""
        
        report_lines = ["=== VRAM UNLOAD REPORT ===\n"]
        
        # Configuration
        report_lines.append(f"Unload Strategy: {strategy}")
        report_lines.append(f"Target Device: {'All Devices' if target_device == -1 else f'Device {target_device}'}")
        report_lines.append(f"Force Unload: {'Yes' if force_unload else 'No'}")
        
        # Memory changes
        device_range = range(torch.cuda.device_count()) if target_device == -1 else [target_device]
        
        for device_id in device_range:
            if device_id >= torch.cuda.device_count():
                continue
                
            device_name = torch.cuda.get_device_name(device_id)
            
            initial_allocated = initial_memory.get(f'gpu_{device_id}_allocated', 0)
            final_allocated = final_memory.get(f'gpu_{device_id}_allocated', 0)
            initial_reserved = initial_memory.get(f'gpu_{device_id}_reserved', 0)
            final_reserved = final_memory.get(f'gpu_{device_id}_reserved', 0)
            
            allocated_freed = initial_allocated - final_allocated
            reserved_freed = initial_reserved - final_reserved
            
            report_lines.append(f"\nDEVICE {device_id} ({device_name}):")
            report_lines.append(f"  Allocated Before: {format_bytes(initial_allocated)}")
            report_lines.append(f"  Allocated After: {format_bytes(final_allocated)}")
            report_lines.append(f"  Allocated Freed: {format_bytes(allocated_freed)}")
            
            report_lines.append(f"  Reserved Before: {format_bytes(initial_reserved)}")
            report_lines.append(f"  Reserved After: {format_bytes(final_reserved)}")
            report_lines.append(f"  Reserved Freed: {format_bytes(reserved_freed)}")
        
        # Summary
        report_lines.append(f"\nSUMMARY:")
        report_lines.append(f"  Total Memory Freed: {format_bytes(memory_freed)}")
        report_lines.append(f"  Unload Strategy: {strategy}")
        
        # Status
        if memory_freed > 0:
            report_lines.append(f"  Status: ✅ Successfully freed VRAM")
        else:
            report_lines.append(f"  Status: ⚠️ No memory freed (may already be optimal)")
        
        return "\n".join(report_lines) 