import torch
import comfy.model_management
from server import PromptServer

MAX_RESOLUTION = 8192

class FluxEmptyLatentSizePicker:
    def __init__(self):
        self.device = comfy.model_management.intermediate_device()

    @classmethod
    def INPUT_TYPES(cls) -> dict:
        return {
            "required": {
                "megapixels": (["1MP", "1.5MP", "2MP", "2.5MP", "3MP", "3.5MP", "4MP"], {"default": "3MP"}),
                "aspect_ratio": (["2.39:1", "16:9", "7:4", "4:3", "1:1", "3:4", "4:7", "9:16"], {"default": "16:9"}),
                "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
                "width_override": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "height_override": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
                "initialization_mode": ([
                    "zeros",
                    "vae_sample", 
                    "gaussian_noise",
                    "uniform_noise"
                ], {"default": "zeros"}),
                "noise_strength": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 2.0, "step": 0.01}),
            },
            "optional": {
                "vae": ("VAE",),
                "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
            },
            "hidden": {
                "unique_id": "UNIQUE_ID",
            }
        }

    RETURN_TYPES = ("LATENT", "INT", "INT",)
    RETURN_NAMES = ("LATENT", "width", "height",)
    FUNCTION = "execute"
    CATEGORY = "42lux"

    def execute(self, megapixels: str, aspect_ratio: str, batch_size: int, initialization_mode: str, noise_strength: float,
                width_override: int = 0, height_override: int = 0, vae=None, seed: int = 0, unique_id=None) -> tuple:
        
        # Resolution lookup table
        resolution_map = {
            # 1MP
            ("1MP", "2.39:1"): (1536, 640),
            ("1MP", "16:9"): (1344, 768),
            ("1MP", "7:4"): (1280, 768),
            ("1MP", "4:3"): (1152, 832),
            ("1MP", "1:1"): (1024, 1024),
            ("1MP", "3:4"): (832, 1152),
            ("1MP", "4:7"): (768, 1280),
            ("1MP", "9:16"): (768, 1344),
            
            # 1.5MP
            ("1.5MP", "2.39:1"): (1920, 768),
            ("1.5MP", "16:9"): (1600, 896),
            ("1.5MP", "7:4"): (1536, 896),
            ("1.5MP", "4:3"): (1408, 1024),
            ("1.5MP", "1:1"): (1216, 1216),
            ("1.5MP", "3:4"): (1024, 1408),
            ("1.5MP", "4:7"): (896, 1536),
            ("1.5MP", "9:16"): (896, 1600),
            
            # 2MP
            ("2MP", "2.39:1"): (2176, 896),
            ("2MP", "16:9"): (1856, 1024),
            ("2MP", "7:4"): (1792, 1024),
            ("2MP", "4:3"): (1600, 1216),
            ("2MP", "1:1"): (1408, 1408),
            ("2MP", "3:4"): (1216, 1600),
            ("2MP", "4:7"): (1024, 1792),
            ("2MP", "9:16"): (1024, 1856),
            
            # 2.5MP
            ("2.5MP", "2.39:1"): (2432, 1024),
            ("2.5MP", "16:9"): (2112, 1152),
            ("2.5MP", "7:4"): (2048, 1152),
            ("2.5MP", "4:3"): (1792, 1344),
            ("2.5MP", "1:1"): (1600, 1600),
            ("2.5MP", "3:4"): (1344, 1792),
            ("2.5MP", "4:7"): (1152, 2048),
            ("2.5MP", "9:16"): (1152, 2112),
            
            # 3MP
            ("3MP", "2.39:1"): (2688, 1088),
            ("3MP", "16:9"): (2304, 1280),
            ("3MP", "7:4"): (2240, 1280),
            ("3MP", "4:3"): (1984, 1472),
            ("3MP", "1:1"): (1728, 1728),
            ("3MP", "3:4"): (1472, 1984),
            ("3MP", "4:7"): (1280, 2240),
            ("3MP", "9:16"): (1280, 2304),
            
            # 3.5MP
            ("3.5MP", "2.39:1"): (2944, 1216),
            ("3.5MP", "16:9"): (2560, 1408),
            ("3.5MP", "7:4"): (2496, 1408),
            ("3.5MP", "4:3"): (2176, 1600),
            ("3.5MP", "1:1"): (1856, 1856),
            ("3.5MP", "3:4"): (1600, 2176),
            ("3.5MP", "4:7"): (1408, 2496),
            ("3.5MP", "9:16"): (1408, 2560),
            
            # 4MP
            ("4MP", "2.39:1"): (3072, 1280),
            ("4MP", "16:9"): (2688, 1472),
            ("4MP", "7:4"): (2624, 1472),
            ("4MP", "4:3"): (2304, 1664),
            ("4MP", "1:1"): (1984, 1984),
            ("4MP", "3:4"): (1664, 2304),
            ("4MP", "4:7"): (1472, 2624),
            ("4MP", "9:16"): (1472, 2688),
        }
        
        # Get dimensions from lookup table
        width, height = resolution_map.get((megapixels, aspect_ratio), (2304, 1280))
        width = width_override if width_override > 0 else width
        height = height_override if height_override > 0 else height
        
        # Send resolution info to frontend
        if unique_id:
            actual_mp = round((width * height) / 1000000, 2)
            PromptServer.instance.send_progress_text(
                f"Resolution: {width}×{height} ({actual_mp}MP)", 
                unique_id
            )
        
        # Calculate latent dimensions
        latent_height = height // 8
        latent_width = width // 8
        
        # Set seed for reproducibility
        if seed != 0:
            torch.manual_seed(seed)
        
        # Generate latent based on initialization mode
        if initialization_mode == "zeros":
            latent = torch.zeros([batch_size, 16, latent_height, latent_width], device=self.device)
            
        elif initialization_mode == "vae_sample":
            if vae is None:
                print("Warning: VAE sampling requested but no VAE provided. Falling back to zeros.")
                latent = torch.zeros([batch_size, 16, latent_height, latent_width], device=self.device)
            else:
                latent = self._sample_from_vae(vae, batch_size, latent_height, latent_width, noise_strength)
                
        elif initialization_mode == "gaussian_noise":
            latent = torch.randn([batch_size, 16, latent_height, latent_width], device=self.device) * noise_strength
            
        elif initialization_mode == "uniform_noise":
            latent = (torch.rand([batch_size, 16, latent_height, latent_width], device=self.device) - 0.5) * 2 * noise_strength
            
        else:
            # Default fallback
            latent = torch.zeros([batch_size, 16, latent_height, latent_width], device=self.device)

        return ({"samples": latent}, width, height,)
    
    def _sample_from_vae(self, vae, batch_size: int, latent_height: int, latent_width: int, noise_strength: float):
        """Sample from VAE's learned latent distribution"""
        try:
            # Method 1: Try to use VAE's built-in sampling if available
            if hasattr(vae, 'sample') and callable(vae.sample):
                with torch.no_grad():
                    # Some VAEs have a sample method that generates from prior
                    samples = vae.sample(batch_size, device=self.device)
                    # Resize to match target dimensions if needed
                    if samples.shape[-2:] != (latent_height, latent_width):
                        samples = torch.nn.functional.interpolate(
                            samples, size=(latent_height, latent_width), mode='bilinear', align_corners=False
                        )
                    return samples
            
            # Method 2: Sample from approximate learned distribution
            elif hasattr(vae, 'first_stage_model') or hasattr(vae, 'encode'):
                # Generate latent with some structure instead of pure noise
                # This mimics what a VAE latent distribution might look like
                latent = torch.randn([batch_size, 16, latent_height, latent_width], device=self.device)
                
                # Scale noise to be more realistic for VAE latents (typically smaller variance)
                latent = latent * (noise_strength * 0.5)  # VAE latents are usually smaller scale
                
                # Add some structured noise to break symmetry
                if latent_height > 1 and latent_width > 1:
                    # Add some low-frequency components
                    low_freq = torch.randn([batch_size, 16, max(1, latent_height//4), max(1, latent_width//4)], device=self.device)
                    low_freq = torch.nn.functional.interpolate(low_freq, size=(latent_height, latent_width), mode='bilinear', align_corners=False)
                    latent = latent + low_freq * (noise_strength * 0.3)
                
                return latent
            
            # Method 3: Fallback to structured noise
            else:
                print("Warning: VAE doesn't have expected sampling methods. Using structured noise instead.")
                return self._generate_structured_noise(batch_size, latent_height, latent_width, noise_strength)
                
        except Exception as e:
            print(f"Error in VAE sampling: {e}. Falling back to structured noise.")
            return self._generate_structured_noise(batch_size, latent_height, latent_width, noise_strength)
    
    def _generate_structured_noise(self, batch_size: int, latent_height: int, latent_width: int, noise_strength: float):
        """Generate structured noise that's better than pure random noise"""
        # Base noise
        latent = torch.randn([batch_size, 16, latent_height, latent_width], device=self.device) * noise_strength
        
        # Add some low-frequency structure
        if latent_height > 4 and latent_width > 4:
            # Create low-frequency noise and upsample
            low_freq_h = max(1, latent_height // 4)
            low_freq_w = max(1, latent_width // 4)
            low_freq = torch.randn([batch_size, 16, low_freq_h, low_freq_w], device=self.device)
            low_freq = torch.nn.functional.interpolate(
                low_freq, size=(latent_height, latent_width), mode='bilinear', align_corners=False
            )
            latent = latent + low_freq * (noise_strength * 0.5)
        
        return latent
