"""
Memory Monitor Node for ComfyUI

Provides real-time memory usage monitoring with detailed system information.
"""

import torch
from ..utils.memory_utils import get_memory_info, format_bytes, check_memory_pressure


class MemoryMonitorNode:
    """
    Memory Monitor Node - Displays current memory usage statistics
    """
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "refresh_trigger": ("INT", {
                    "default": 0,
                    "min": 0,
                    "max": 1000000,
                    "step": 1,
                    "display": "number"
                }),
            },
            "optional": {
                "show_detailed": ("BOOLEAN", {"default": True}),
                "show_gpu_info": ("BOOLEAN", {"default": True}),
                "memory_threshold_warning": ("FLOAT", {
                    "default": 80.0,
                    "min": 50.0,
                    "max": 95.0,
                    "step": 5.0,
                    "display": "slider"
                }),
            }
        }
    
    RETURN_TYPES = (
        "STRING",      # Summary text
        "STRING",      # Detailed report
        "FLOAT",       # Memory percentage
        "BOOLEAN",     # Is under pressure
        "STRING",      # Recommendation
    )
    
    RETURN_NAMES = (
        "memory_summary",
        "detailed_report", 
        "memory_percent",
        "under_pressure",
        "recommendation",
    )
    
    FUNCTION = "monitor_memory"
    CATEGORY = "Memory Management"
    OUTPUT_NODE = True
    
    def monitor_memory(self, refresh_trigger, show_detailed=True, show_gpu_info=True, memory_threshold_warning=80.0):
        """Monitor current memory usage and return comprehensive report"""
        
        try:
            # Get memory information
            memory_info = get_memory_info()
            pressure_info = check_memory_pressure()
            
            # Override threshold if provided
            pressure_info['under_pressure'] = memory_info.get('system_percent', 0) >= memory_threshold_warning
            
            # Create summary
            system_used_gb = memory_info.get('system_used', 0) / (1024**3)
            system_total_gb = memory_info.get('system_total', 0) / (1024**3)
            system_percent = memory_info.get('system_percent', 0)
            
            summary = f"RAM: {system_used_gb:.1f}GB / {system_total_gb:.1f}GB ({system_percent:.1f}%)"
            
            # Add GPU info to summary if available and requested
            if show_gpu_info and torch.cuda.is_available():
                gpu_info = []
                for i in range(torch.cuda.device_count()):
                    allocated_key = f'gpu_{i}_allocated'
                    if allocated_key in memory_info:
                        allocated_gb = memory_info[allocated_key] / (1024**3)
                        gpu_info.append(f"GPU{i}: {allocated_gb:.1f}GB")
                if gpu_info:
                    summary += f" | {' | '.join(gpu_info)}"
            
            # Create detailed report
            detailed_report = self._create_detailed_report(memory_info, show_detailed, show_gpu_info)
            
            # Determine status and recommendation
            recommendation = pressure_info.get('recommendation', 'Memory usage is normal')
            under_pressure = pressure_info.get('under_pressure', False)
            
            return (
                summary,
                detailed_report,
                system_percent,
                under_pressure,
                recommendation
            )
            
        except Exception as e:
            error_msg = f"Error monitoring memory: {str(e)}"
            return (
                error_msg,
                error_msg,
                0.0,
                False,
                "Error occurred during monitoring"
            )
    
    def _create_detailed_report(self, memory_info, show_detailed, show_gpu_info):
        """Create detailed memory report"""
        
        if not show_detailed:
            return "Detailed reporting disabled"
        
        report_lines = ["=== MEMORY USAGE REPORT ===\n"]
        
        # System Memory
        report_lines.append("SYSTEM MEMORY:")
        report_lines.append(f"  Total: {format_bytes(memory_info.get('system_total', 0))}")
        report_lines.append(f"  Used: {format_bytes(memory_info.get('system_used', 0))}")
        report_lines.append(f"  Available: {format_bytes(memory_info.get('system_available', 0))}")
        report_lines.append(f"  Usage: {memory_info.get('system_percent', 0):.1f}%")
        
        # Swap Memory
        if memory_info.get('swap_total', 0) > 0:
            report_lines.append("\nSWAP MEMORY:")
            report_lines.append(f"  Total: {format_bytes(memory_info.get('swap_total', 0))}")
            report_lines.append(f"  Used: {format_bytes(memory_info.get('swap_used', 0))}")
            report_lines.append(f"  Usage: {memory_info.get('swap_percent', 0):.1f}%")
        
        # Process Memory
        report_lines.append("\nPROCESS MEMORY:")
        report_lines.append(f"  RSS: {format_bytes(memory_info.get('process_rss', 0))}")
        report_lines.append(f"  VMS: {format_bytes(memory_info.get('process_vms', 0))}")
        report_lines.append(f"  Percentage: {memory_info.get('process_percent', 0):.2f}%")
        
        # GPU Memory
        if show_gpu_info and torch.cuda.is_available():
            report_lines.append("\nGPU MEMORY:")
            for i in range(torch.cuda.device_count()):
                gpu_name = torch.cuda.get_device_name(i)
                allocated = memory_info.get(f'gpu_{i}_allocated', 0)
                reserved = memory_info.get(f'gpu_{i}_reserved', 0)
                max_allocated = memory_info.get(f'gpu_{i}_max_allocated', 0)
                
                report_lines.append(f"  GPU {i} ({gpu_name}):")
                report_lines.append(f"    Allocated: {format_bytes(allocated)}")
                report_lines.append(f"    Reserved: {format_bytes(reserved)}")
                report_lines.append(f"    Max Allocated: {format_bytes(max_allocated)}")
        
        # Memory Pressure Analysis
        pressure_info = check_memory_pressure()
        report_lines.append("\nMEMORY PRESSURE ANALYSIS:")
        report_lines.append(f"  Status: {pressure_info.get('status', 'unknown').upper()}")
        report_lines.append(f"  Under Pressure: {'Yes' if pressure_info.get('under_pressure', False) else 'No'}")
        report_lines.append(f"  Available: {pressure_info.get('available_gb', 0):.2f} GB")
        report_lines.append(f"  Recommendation: {pressure_info.get('recommendation', 'N/A')}")
        
        return "\n".join(report_lines) 