import torch
import math
import sys
import os
from torch import Tensor, nn
from einops import rearrange, repeat
import comfy.ldm.common_dit
import comfy.model_management
from comfy.ldm.flux.model import Flux as OriginalFlux
from comfy.ldm.flux.layers import EmbedND as OriginalEmbedND

# Nunchaku support
try:
    nunchaku_path = os.path.join(os.path.dirname(__file__), "..", "..", "..", "ComfyUI-nunchaku")
    if os.path.exists(nunchaku_path):
        sys.path.insert(0, nunchaku_path)
        from wrappers.flux import ComfyFluxWrapper
        NUNCHAKU_AVAILABLE = True
    else:
        ComfyFluxWrapper = None
        NUNCHAKU_AVAILABLE = False
except ImportError:
    ComfyFluxWrapper = None
    NUNCHAKU_AVAILABLE = False


def nunchaku_rope(pos: torch.Tensor, dim: int, theta: int, ntk_factor: float = 1.0, axis_idx: int = -1) -> torch.Tensor:
    """Rotary positional embedding function with NTK-aware scaling."""
    assert dim % 2 == 0
    
    # NTK-RoPE scaling for image models
    if ntk_factor != 1.0 and ntk_factor > 1.0:
        original_theta = theta
        scaled_factor = 1.0 + (ntk_factor - 1.0) * 0.2
        theta = int(theta * scaled_factor)
        if axis_idx >= 0:
            axis_name = ["batch/seq", "height", "width"][axis_idx] if axis_idx < 3 else f"axis_{axis_idx}"
            print(f"[NTK-RoPE Debug] {axis_name} - factor: {ntk_factor:.4f} -> scaled: {scaled_factor:.4f}, theta: {original_theta} -> {theta}")
    
    scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
    omega = 1.0 / (theta**scale)
    
    # Clamp omega to prevent extreme frequencies that cause artifacts
    omega = torch.clamp(omega, min=1e-6, max=1e4)
    
    out = torch.einsum("...n,d->...nd", pos, omega)
    cos_out = torch.cos(out)
    sin_out = torch.sin(out)
    stacked_out = torch.stack([sin_out, cos_out], dim=-1)
    return stacked_out.view(pos.shape[0], -1, dim // 2, 1, 2).float()


class NunchakuEmbedND(nn.Module):
    """Multi-dimensional rotary embedding module."""
    
    def __init__(self, dim: int, theta: int, axes_dim: list[int]):
        super().__init__()
        self.dim = dim
        self.theta = theta
        self.axes_dim = axes_dim
    
    def forward(self, ids: torch.Tensor) -> torch.Tensor:
        try:
            import diffusers
            from packaging.version import Version
            if Version(diffusers.__version__) >= Version("0.31.0"):
                ids = ids[None, ...]
        except:
            pass
        n_axes = ids.shape[-1]
        emb = torch.cat([nunchaku_rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)], dim=-3)
        return emb.unsqueeze(1)


class HighResFixNunchakuWrapper(nn.Module):
    """Wrapper to add HighResFix functionality to Nunchaku models."""
    
    def __init__(self, original_wrapper):
        super().__init__()
        self.original_wrapper = original_wrapper
        self.highresfix_config = {}
        self._original_forward = original_wrapper.forward
    
    def __getattr__(self, name):
        if name in ['original_wrapper', 'highresfix_config', '_original_forward']:
            return super().__getattr__(name)
        return getattr(self.original_wrapper, name)
    
    def forward(self, x, timestep, context, y, guidance, control=None, transformer_options={}, **kwargs):
        # Get HighResFix config
        grid_w = self.highresfix_config.get('grid_w', 1)
        grid_h = self.highresfix_config.get('grid_h', 1)
        patch_size = self.highresfix_config.get('patch_size', 64)
        total_duplicates = grid_w * grid_h
        debug = self.highresfix_config.get('debug', False)
        
        if debug and timestep[0].item() > 0.9:
            print(f"\n[HighResFixNunchakuWrapper.forward] Debug info:")
            print(f"  Grid: {grid_w}x{grid_h} = {total_duplicates} duplicates")
            print(f"  Context shape: {context.shape}")
            print(f"  Input shape: {x.shape}")
        
        original_model = self.original_wrapper.model
        original_model_call = original_model.__call__
        
        def patched_model_call(hidden_states, encoder_hidden_states, pooled_projections, 
                              timestep, img_ids, txt_ids, guidance=None, 
                              controlnet_block_samples=None, controlnet_single_block_samples=None, **model_kwargs):
            if total_duplicates > 1:
                bs = txt_ids.shape[0]
                original_seq_len = txt_ids.shape[1] // total_duplicates
                
                if debug and hasattr(timestep, 'item') and timestep[0].item() > 0.9:
                    print(f"\n[HighResFixNunchakuWrapper] Applying HighResFix position offsets:")
                    print(f"  Duplicates: {total_duplicates}, seq_len per duplicate: {original_seq_len}")
                
                txt_ids_new = txt_ids.clone()
                for i in range(grid_h):
                    for j in range(grid_w):
                        idx = i * grid_w + j
                        start_idx = idx * original_seq_len
                        end_idx = (idx + 1) * original_seq_len
                        txt_ids_new[:, start_idx:end_idx, 1] += i * patch_size
                        txt_ids_new[:, start_idx:end_idx, 2] += j * patch_size
                        
                        if debug and hasattr(timestep, 'item') and timestep[0].item() > 0.9 and idx < 4:
                            print(f"  Grid [{i},{j}]: offset=({i * patch_size}, {j * patch_size})")
                
                txt_ids = txt_ids_new
            
            return original_model_call(
                hidden_states=hidden_states,
                encoder_hidden_states=encoder_hidden_states,
                pooled_projections=pooled_projections,
                timestep=timestep,
                img_ids=img_ids,
                txt_ids=txt_ids,
                guidance=guidance,
                controlnet_block_samples=controlnet_block_samples,
                controlnet_single_block_samples=controlnet_single_block_samples,
                **model_kwargs
            )
        
        original_model.__call__ = patched_model_call
        
        try:
            result = self._original_forward(x, timestep, context, y, guidance, control, transformer_options, **kwargs)
        finally:
            original_model.__call__ = original_model_call
        
        return result


class HighResFixFlux(OriginalFlux):
    """Custom Flux class for HighResFix token duplication."""
    
    def forward(self, x, timestep, context, y, guidance, control=None, transformer_options={}, **kwargs):
        highresfix_config = getattr(self, 'highresfix_config', {})
        grid_w = highresfix_config.get('grid_w', 1)
        grid_h = highresfix_config.get('grid_h', 1)
        patch_size = highresfix_config.get('patch_size', 64)
        total_duplicates = grid_w * grid_h
        debug = highresfix_config.get('debug', False)
        
        bs, c, h, w = x.shape
        original_h, original_w = h, w  # Store original dimensions
        patch_size_local = 2
        
        # Calculate proper padding to ensure clean patch boundaries
        h_pad = (patch_size_local - h % patch_size_local) % patch_size_local
        w_pad = (patch_size_local - w % patch_size_local) % patch_size_local
        
        # Apply padding manually to have more control
        if h_pad > 0 or w_pad > 0:
            x = torch.nn.functional.pad(x, (0, w_pad, 0, h_pad), mode='constant', value=0)
        
        _, _, h_padded, w_padded = x.shape
        h_len = h_padded // patch_size_local
        w_len = w_padded // patch_size_local
        
        if debug and timestep[0].item() > 0.9:  # Only debug early steps
            print(f"\n[HighResFixFlux.forward] Debug info at timestep {timestep[0].item():.3f}:")
            print(f"  Input shape: {bs}x{c}x{original_h}x{original_w}")
            print(f"  Padding: h_pad={h_pad}, w_pad={w_pad}")
            print(f"  Padded shape: {x.shape}")
            print(f"  h_len={h_len}, w_len={w_len}")
            print(f"  Grid: {grid_w}x{grid_h} = {total_duplicates} duplicates")
            print(f"  Context shape: {context.shape}")
            print(f"  patch_size_local={patch_size_local}, patch_size={patch_size}")
        
        img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size_local, pw=patch_size_local)
        
        img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype)
        img_ids[..., 1] = img_ids[..., 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype)[:, None]
        img_ids[..., 2] = img_ids[..., 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype)[None, :]
        img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
        
        if debug and timestep[0].item() > 0.9:
            print(f"\n  img_ids shape: {img_ids.shape}")
            print(f"  img_ids range - dim0: [{img_ids[0,:,0].min().item():.1f}, {img_ids[0,:,0].max().item():.1f}]")
            print(f"  img_ids range - dim1: [{img_ids[0,:,1].min().item():.1f}, {img_ids[0,:,1].max().item():.1f}]")
            print(f"  img_ids range - dim2: [{img_ids[0,:,2].min().item():.1f}, {img_ids[0,:,2].max().item():.1f}]")
        
        if total_duplicates > 1:
            original_seq_len = context.shape[1] // total_duplicates
            txt_ids_list = []
            
            if debug and timestep[0].item() > 0.9:
                print(f"\n  Token duplication: {total_duplicates}x")
            
            for i in range(grid_h):
                for j in range(grid_w):
                    txt_ids_part = torch.zeros((bs, original_seq_len, 3), device=x.device, dtype=x.dtype)
                    txt_ids_part[:, :, 1] = i * patch_size
                    txt_ids_part[:, :, 2] = j * patch_size
                    txt_ids_list.append(txt_ids_part)
            
            txt_ids = torch.cat(txt_ids_list, dim=1)
        else:
            txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype)
        
        if control is not None and isinstance(control, dict):
            if "input" in control and control["input"] is None:
                control["input"] = []
            if "output" in control and control["output"] is None:
                control["output"] = []
        
        if debug and timestep[0].item() > 0.9:
            print(f"\n  Position ranges:")
            print(f"    img: ({img_ids[0,:,1].max():.0f}, {img_ids[0,:,2].max():.0f})")
            print(f"    txt: ({txt_ids[0,:,1].max():.0f}, {txt_ids[0,:,2].max():.0f})")
        
        out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance, control, transformer_options=transformer_options)
        
        # Reconstruct the spatial dimensions
        out_reshaped = rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=patch_size_local, pw=patch_size_local)
        
        # Apply edge smoothing if we have padding
        smooth_edges = highresfix_config.get('smooth_edges', True)
        if smooth_edges and (h_pad > 0 or w_pad > 0):
            # Create fade masks for smooth transitions
            fade_size = min(4, h_pad, w_pad)  # Size of the fade region
            
            if h_pad > 0 and fade_size > 0:
                # Create vertical fade mask
                fade_weights = torch.linspace(1.0, 0.0, fade_size, device=x.device, dtype=x.dtype)
                out_reshaped[:, :, original_h:original_h+fade_size, :] *= fade_weights.view(1, 1, -1, 1)
            
            if w_pad > 0 and fade_size > 0:
                # Create horizontal fade mask
                fade_weights = torch.linspace(1.0, 0.0, fade_size, device=x.device, dtype=x.dtype)
                out_reshaped[:, :, :, original_w:original_w+fade_size] *= fade_weights.view(1, 1, 1, -1)
        
        # Crop back to original dimensions
        return out_reshaped[:, :, :original_h, :original_w]


class HighResFixEmbedND(OriginalEmbedND):
    """Custom EmbedND with NTK-aware RoPE."""
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.ntk_factor = 1.0
    
    def forward(self, ids: Tensor) -> Tensor:
        n_axes = ids.shape[-1]
        
        def rope_ntk(pos: Tensor, dim: int, theta: int) -> Tensor:
            assert dim % 2 == 0
            
            if comfy.model_management.is_device_mps(pos.device) or \
               comfy.model_management.is_intel_xpu() or \
               comfy.model_management.is_directml_enabled():
                device = torch.device("cpu")
            else:
                device = pos.device
            
            scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device)
            
            # Apply NTK scaling - VERY conservative for image models
            if self.ntk_factor != 1.0 and self.ntk_factor > 1.0:
                # Use only 20% of the factor to maintain spatial coherence
                scaled_factor = 1.0 + (self.ntk_factor - 1.0) * 0.2
                theta_scaled = theta * scaled_factor
            else:
                theta_scaled = theta
                
            omega = 1.0 / (theta_scaled**scale)
            
            # Clamp omega to prevent artifacts from extreme frequencies
            omega = torch.clamp(omega, min=1e-6, max=1e4)
            
            out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega)
            out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
            out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
            return out.to(dtype=torch.float32, device=pos.device)
        
        emb = torch.cat(
            [rope_ntk(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
            dim=-3,
        )
        return emb.unsqueeze(1)


class HighResFixModelInjection:
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("MODEL",),
                "target_width": ("INT", {"default": 2048, "min": 512, "max": 8192, "step": 64}),
                "target_height": ("INT", {"default": 2048, "min": 512, "max": 8192, "step": 64}),
                "enable_token_duplication": ("BOOLEAN", {"default": True}),
                "enable_ntk_rope": ("BOOLEAN", {"default": True}),
                "ntk_method": (["auto", "manual"], {"default": "auto"}),
                "ntk_factor": ("FLOAT", {"default": 10.0, "min": 1.0, "max": 20.0, "step": 0.5}),
                "flux_multiplier": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 5.0, "step": 0.1}),
                "smooth_edges": ("BOOLEAN", {"default": True}),
                "debug": ("BOOLEAN", {"default": True}),
            }
        }
    
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "inject"
    CATEGORY = "highresfix"
    
    def inject(self, model, target_width, target_height, enable_token_duplication=True,
               enable_ntk_rope=True, ntk_method="auto", ntk_factor=10.0, 
               flux_multiplier=2.5, smooth_edges=True, debug=True):
        
        # Clone model
        m = model.clone()
        
        # Calculate base resolution dynamically based on aspect ratio
        # Target ~1MP (1024x1024 = 1,048,576 pixels)
        target_pixels = 1024 * 1024
        aspect_ratio = target_width / target_height
        base_height = int(math.sqrt(target_pixels / aspect_ratio))
        base_width = int(base_height * aspect_ratio)
        
        # Round to nearest 64 (patch size)
        base_width = (base_width // 64) * 64
        base_height = (base_height // 64) * 64
        
        # Calculate grid dimensions
        patch_size = 64
        if enable_token_duplication:
            grid_w = max(1, target_width // base_width)
            grid_h = max(1, target_height // base_height)
            total_duplicates = grid_w * grid_h
        else:
            grid_w = 1
            grid_h = 1
            total_duplicates = 1
        
        if debug:
            print(f"\n[HighResFixModelInjection] Applying HighResFix techniques:")
            print(f"  Target: {target_width}x{target_height}")
            print(f"  Base resolution: {base_width}x{base_height} (aspect ratio: {aspect_ratio:.2f})")
            print(f"  Token duplication: {'Enabled' if enable_token_duplication else 'Disabled'}")
            if enable_token_duplication:
                print(f"  Duplication grid: {grid_w}x{grid_h} = {total_duplicates}x")
            print(f"  NTK-RoPE: {'Enabled' if enable_ntk_rope else 'Disabled'}")
        
        if hasattr(m, 'model') and hasattr(m.model, 'diffusion_model'):
            diffusion_model = m.model.diffusion_model
            
            # Detect Nunchaku models by checking for process_img but no img_in
            is_nunchaku = hasattr(diffusion_model, 'process_img') and not hasattr(diffusion_model, 'img_in')
            
            if debug:
                print(f"\n  Model type: {'Nunchaku' if is_nunchaku else 'Standard Flux'}")
            
            if is_nunchaku:
                if debug:
                    print(f"  Detected Nunchaku model")
                
                # Wrap the Nunchaku wrapper with our HighResFix wrapper
                highresfix_wrapper = HighResFixNunchakuWrapper(diffusion_model)
                highresfix_wrapper.highresfix_config = {
                    'grid_w': grid_w,
                    'grid_h': grid_h,
                    'patch_size': patch_size,
                    'total_duplicates': total_duplicates,
                    'debug': debug
                }
                
                # Replace the diffusion model with our wrapper
                m.model.diffusion_model = highresfix_wrapper
                
                print(f"[NTK Debug] After wrapping:")
                print(f"[NTK Debug] New diffusion_model type: {type(m.model.diffusion_model)}")
                print(f"[NTK Debug] highresfix_wrapper has original_wrapper: {hasattr(highresfix_wrapper, 'original_wrapper')}")
                
                if debug:
                    print(f"  ✓ Wrapped Nunchaku model with HighResFix functionality")
                
                # NTK-RoPE for Nunchaku models
                print(f"\n[NTK Debug] Checking Nunchaku model structure:")
                print(f"[NTK Debug] diffusion_model type: {type(diffusion_model)}")
                print(f"[NTK Debug] Has 'model' attr: {hasattr(diffusion_model, 'model')}")
                if hasattr(diffusion_model, 'model'):
                    print(f"[NTK Debug] model type: {type(diffusion_model.model)}")
                    print(f"[NTK Debug] Has 'pos_embed': {hasattr(diffusion_model.model, 'pos_embed')}")
                
                # After wrapping, we need to check the original wrapper
                actual_model = None
                if hasattr(highresfix_wrapper, 'original_wrapper'):
                    print(f"[NTK Debug] Checking highresfix_wrapper.original_wrapper")
                    if hasattr(highresfix_wrapper.original_wrapper, 'model'):
                        actual_model = highresfix_wrapper.original_wrapper.model
                        print(f"[NTK Debug] Found model in original_wrapper")
                        print(f"[NTK Debug] Model type: {type(actual_model)}")
                        print(f"[NTK Debug] Has 'pos_embed': {hasattr(actual_model, 'pos_embed')}")
                
                if enable_ntk_rope and actual_model and hasattr(actual_model, 'pos_embed'):
                    
                    # Calculate NTK factor
                    if ntk_method == "auto":
                        native_seq_len = (base_height // patch_size) * (base_width // patch_size)
                        target_seq_len = (target_height // patch_size) * (target_width // patch_size)
                        ratio = target_seq_len / native_seq_len
                        if ratio <= 1.0:
                            calculated_ntk = 1.0
                        elif ratio <= 2.0:
                            # For up to 2x size, use very subtle scaling
                            calculated_ntk = 1.0 + (ratio - 1.0) * 0.1  # Max 1.1 at 2x
                        elif ratio <= 4.0:
                            # For 2x-4x, slightly more but still conservative  
                            calculated_ntk = 1.1 + (ratio - 2.0) * 0.05  # Max 1.2 at 4x
                        else:
                            # Beyond 4x, cap at 1.3
                            calculated_ntk = min(1.3, 1.2 + (ratio - 4.0) * 0.025)
                        
                        if target_seq_len > native_seq_len:
                            # Apply flux_multiplier with diminishing returns for very high resolutions
                            scale_ratio = target_seq_len / native_seq_len
                            if scale_ratio > 4.0:  # >2x resolution in each dimension
                                # Use logarithmic scaling for extreme resolutions
                                adjusted_multiplier = 1.0 + (flux_multiplier - 1.0) * (math.log(scale_ratio) / math.log(4.0))
                                final_ntk_factor = calculated_ntk * adjusted_multiplier
                            else:
                                final_ntk_factor = calculated_ntk * flux_multiplier
                            
                            # Cap the maximum NTK factor to prevent artifacts
                            max_safe_ntk = 8.0  # Based on empirical testing
                            if final_ntk_factor > max_safe_ntk:
                                if debug:
                                    print(f"  ⚠️ Capping NTK factor from {final_ntk_factor:.2f} to {max_safe_ntk:.2f} to prevent artifacts")
                                final_ntk_factor = max_safe_ntk
                        else:
                            final_ntk_factor = calculated_ntk
                    else:
                        final_ntk_factor = ntk_factor
                    
                    class HighResFixNunchakuEmbedND(NunchakuEmbedND):
                        def __init__(self, *args, **kwargs):
                            super().__init__(*args, **kwargs)
                            self.ntk_factor = 1.0
                        
                        def forward(self, ids: Tensor) -> Tensor:
                            if hasattr(self, '_debug_count'):
                                self._debug_count += 1
                            else:
                                self._debug_count = 1
                            
                            if self._debug_count <= 3:  # Only debug first few calls
                                print(f"\n[NTK-RoPE Debug] HighResFixNunchakuEmbedND.forward called:")
                                print(f"  NTK factor: {self.ntk_factor:.4f}")
                                print(f"  Theta: {self.theta}")
                                print(f"  IDs shape: {ids.shape}")
                            
                            try:
                                import diffusers
                                from packaging.version import Version
                                if Version(diffusers.__version__) >= Version("0.31.0"):
                                    ids = ids[None, ...]
                            except:
                                pass
                            
                            n_axes = ids.shape[-1]
                            # Apply NTK scaling only to spatial axes (not the first axis)
                            # axes_dim is typically [16, 56, 56] where first is batch/seq, others are H/W
                            emb_list = []
                            for i in range(n_axes):
                                if i == 0:
                                    # Don't apply NTK to the first axis (batch/sequence)
                                    emb_list.append(nunchaku_rope(ids[..., i], self.axes_dim[i], self.theta, 1.0, axis_idx=i))
                                else:
                                    # Apply NTK to spatial axes (height/width)
                                    emb_list.append(nunchaku_rope(ids[..., i], self.axes_dim[i], self.theta, self.ntk_factor, axis_idx=i))
                            
                            emb = torch.cat(emb_list, dim=-3)
                            return emb.unsqueeze(1)
                    
                    # Replace the pos_embed
                    old_pos_embed = actual_model.pos_embed
                    
                    if debug:
                        print(f"\n  [Debug] Original pos_embed class: {old_pos_embed.__class__.__name__}")
                        print(f"  [Debug] Has theta: {hasattr(old_pos_embed, 'theta')}")
                        if hasattr(old_pos_embed, 'theta'):
                            print(f"  [Debug] Original theta: {old_pos_embed.theta}")
                        print(f"  [Debug] Has axes_dim: {hasattr(old_pos_embed, 'axes_dim')}")
                        if hasattr(old_pos_embed, 'axes_dim'):
                            print(f"  [Debug] axes_dim: {old_pos_embed.axes_dim}")
                    
                    actual_model.pos_embed.__class__ = HighResFixNunchakuEmbedND
                    actual_model.pos_embed.ntk_factor = final_ntk_factor
                    
                    if debug:
                        print(f"  ✓ Injected NTK-RoPE for Nunchaku with factor: {final_ntk_factor:.4f}")
                        print(f"  [Debug] New pos_embed class: {actual_model.pos_embed.__class__.__name__}")
                        print(f"  [Debug] NTK factor set: {actual_model.pos_embed.ntk_factor}")
            
            else:
                # Standard ComfyUI Flux model
                if debug:
                    print(f"  Detected standard Flux model")
                
                # Inject custom Flux class
                diffusion_model.__class__ = HighResFixFlux
                
                # Store HighResFix config
                diffusion_model.highresfix_config = {
                    'grid_w': grid_w,
                    'grid_h': grid_h,
                    'patch_size': patch_size,
                    'total_duplicates': total_duplicates,
                    'smooth_edges': smooth_edges,
                    'debug': debug
                }
                
                if debug:
                    print(f"  ✓ Injected HighResFix Flux class")
                
                # Apply NTK-RoPE if enabled
                if enable_ntk_rope and hasattr(diffusion_model, 'pe_embedder'):
                    # Calculate NTK factor
                    if ntk_method == "auto":
                        native_seq_len = (base_height // patch_size) * (base_width // patch_size)
                        target_seq_len = (target_height // patch_size) * (target_width // patch_size)
                        ratio = target_seq_len / native_seq_len
                        if ratio <= 1.0:
                            calculated_ntk = 1.0
                        elif ratio <= 2.0:
                            # For up to 2x size, use very subtle scaling
                            calculated_ntk = 1.0 + (ratio - 1.0) * 0.1  # Max 1.1 at 2x
                        elif ratio <= 4.0:
                            # For 2x-4x, slightly more but still conservative  
                            calculated_ntk = 1.1 + (ratio - 2.0) * 0.05  # Max 1.2 at 4x
                        else:
                            # Beyond 4x, cap at 1.3
                            calculated_ntk = min(1.3, 1.2 + (ratio - 4.0) * 0.025)
                        
                        if target_seq_len > native_seq_len:
                            # Apply flux_multiplier with diminishing returns for very high resolutions
                            scale_ratio = target_seq_len / native_seq_len
                            if scale_ratio > 4.0:  # >2x resolution in each dimension
                                # Use logarithmic scaling for extreme resolutions
                                adjusted_multiplier = 1.0 + (flux_multiplier - 1.0) * (math.log(scale_ratio) / math.log(4.0))
                                final_ntk_factor = calculated_ntk * adjusted_multiplier
                            else:
                                final_ntk_factor = calculated_ntk * flux_multiplier
                            
                            # Cap the maximum NTK factor to prevent artifacts
                            max_safe_ntk = 8.0
                            if final_ntk_factor > max_safe_ntk:
                                if debug:
                                    print(f"  ⚠️ Capping NTK factor from {final_ntk_factor:.2f} to {max_safe_ntk:.2f} to prevent artifacts")
                                final_ntk_factor = max_safe_ntk
                        else:
                            final_ntk_factor = calculated_ntk
                    else:
                        final_ntk_factor = ntk_factor
                    
                    # Replace pe_embedder
                    old_embedder = diffusion_model.pe_embedder
                    diffusion_model.pe_embedder.__class__ = HighResFixEmbedND
                    diffusion_model.pe_embedder.ntk_factor = final_ntk_factor
                    diffusion_model.pe_embedder.debug = debug
                    
                    if debug:
                        print(f"  ✓ Injected NTK-RoPE with factor: {final_ntk_factor:.4f}")
        
        return (m,)


class HighResFixConditioningDuplicator:
    """Simple conditioning duplicator for HighResFix"""
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "conditioning": ("CONDITIONING",),
                "target_width": ("INT", {"default": 2048, "min": 512, "max": 8192, "step": 64}),
                "target_height": ("INT", {"default": 2048, "min": 512, "max": 8192, "step": 64}),
                "enable_token_duplication": ("BOOLEAN", {"default": True}),
            }
        }
    
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "duplicate"
    CATEGORY = "highresfix"
    
    def duplicate(self, conditioning, target_width, target_height, enable_token_duplication=True, debug=False):
        if not enable_token_duplication:
            return (conditioning,)
        
        # Calculate base resolution dynamically based on aspect ratio
        target_pixels = 1024 * 1024
        aspect_ratio = target_width / target_height
        base_height = int(math.sqrt(target_pixels / aspect_ratio))
        base_width = int(base_height * aspect_ratio)
        
        # Round to nearest 64 (patch size)
        base_width = (base_width // 64) * 64
        base_height = (base_height // 64) * 64
        
        grid_w = max(1, target_width // base_width)
        grid_h = max(1, target_height // base_height)
        total_duplicates = grid_w * grid_h
        
        if total_duplicates == 1:
            if debug:
                print(f"[HighResFixConditioningDuplicator] No duplication needed (1x1 grid)")
            return (conditioning,)
        
        new_conditioning = []
        for idx, c in enumerate(conditioning):
            cond_data = c[0]
            metadata = c[1].copy() if len(c) > 1 else {}
            
            if debug and idx == 0:  # Only debug first conditioning
                print(f"\n[HighResFixConditioningDuplicator] Debug info:")
                print(f"  Target: {target_width}x{target_height}")
                print(f"  Base resolution: {base_width}x{base_height} (aspect ratio: {aspect_ratio:.2f})")
                print(f"  Grid: {grid_w}x{grid_h} = {total_duplicates}x duplication")
                print(f"  Original conditioning shape: {cond_data.shape}")
            
            duplicated_cond = torch.cat([cond_data] * total_duplicates, dim=1)
            
            if debug and idx == 0:
                print(f"  Duplicated conditioning shape: {duplicated_cond.shape}")
                print(f"  Sequence length: {cond_data.shape[1]} -> {duplicated_cond.shape[1]}")
            
            new_conditioning.append([duplicated_cond, metadata])
        
        return (new_conditioning,)


class HighResFixApply:
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("MODEL",),
                "conditioning": ("CONDITIONING",),
                "target_width": ("INT", {"default": 2048, "min": 512, "max": 8192, "step": 64}),
                "target_height": ("INT", {"default": 2048, "min": 512, "max": 8192, "step": 64}),
            },
            "optional": {
                "debug": ("BOOLEAN", {"default": False}),
            }
        }
    
    RETURN_TYPES = ("MODEL", "CONDITIONING")
    FUNCTION = "apply"
    CATEGORY = "highresfix"
    
    def apply(self, model, conditioning, target_width, target_height, debug=False):
        
        # First apply model injection with all features enabled
        model_injector = HighResFixModelInjection()
        enable_token_duplication = True
        enable_ntk_rope = True
        ntk_method = "auto"
        ntk_factor = 10.0  # Not used when auto mode
        flux_multiplier = 1.5  # Good default for Flux
        smooth_edges = True  # Enable edge smoothing for non-1:1 ratios
        
        modified_model = model_injector.inject(
            model, target_width, target_height, enable_token_duplication,
            enable_ntk_rope, ntk_method, ntk_factor, flux_multiplier, smooth_edges, debug
        )[0]
        
        # Always apply entropy shift with optimal values
        entropy_scale_method = "sqrt"
        entropy_scale_factor = 1.0
        
        modified_model = self._apply_entropy_shift(
            modified_model, target_width, target_height, 
            entropy_scale_method, entropy_scale_factor, debug
        )
        
        # Then duplicate conditioning
        cond_duplicator = HighResFixConditioningDuplicator()
        modified_conditioning = cond_duplicator.duplicate(
            conditioning, target_width, target_height, enable_token_duplication, debug
        )[0]
        
        return (modified_model, modified_conditioning)
    
    def _apply_entropy_shift(self, model, target_width, target_height, 
                           scale_method, scale_factor, debug):
        """Apply entropy shift to attention mechanisms"""
        
        # Calculate base and target sequence lengths
        patch_size = 64
        base_seq_len = (1024 // patch_size) ** 2  # 256 for 1024x1024
        target_seq_len = (target_height // patch_size) * (target_width // patch_size)
        
        # Calculate entropy scale
        if scale_method == 'sqrt':
            entropy_scale = math.sqrt(base_seq_len / target_seq_len) * scale_factor
        elif scale_method == 'linear':
            entropy_scale = (base_seq_len / target_seq_len) * scale_factor
        else:  # log
            entropy_scale = (math.log(base_seq_len) / math.log(target_seq_len)) * scale_factor if target_seq_len > 1 else 1.0
        
        if debug:
            print(f"\n[HighResFixApply] Applying entropy shift:")
            print(f"  Target sequence length: {target_seq_len} (base: {base_seq_len})")
            print(f"  Entropy scale ({scale_method}): {entropy_scale:.3f}")
        
        # Store entropy scale in model for attention layers to use
        if not hasattr(model, 'highresfix_entropy_scale'):
            model.highresfix_entropy_scale = entropy_scale
        
        # Patch attention if we have access to the model
        if hasattr(model, 'model') and hasattr(model.model, 'diffusion_model'):
            self._patch_attention_entropy(model.model.diffusion_model, entropy_scale, debug)
        
        return model
    
    def _patch_attention_entropy(self, diffusion_model, entropy_scale, debug):
        """Patch attention mechanisms to apply entropy scaling"""
        
        def patch_attention_module(module, path=""):
            # Check if this is an attention module
            if hasattr(module, 'to_q') and hasattr(module, 'to_k') and hasattr(module, 'to_v'):
                original_forward = module.forward
                
                def entropy_scaled_forward(hidden_states, encoder_hidden_states=None, **kwargs):
                    # Store original scale if exists
                    original_scale = getattr(module, 'scale', None)
                    
                    # Apply entropy scaling to attention
                    if hasattr(module, 'scale'):
                        module.scale = module.scale * entropy_scale
                    elif hasattr(module, 'attn') and hasattr(module.attn, 'scale'):
                        module.attn.scale = module.attn.scale * entropy_scale
                    
                    # Call original forward
                    try:
                        output = original_forward(hidden_states, encoder_hidden_states, **kwargs)
                    finally:
                        # Restore original scale
                        if original_scale is not None:
                            if hasattr(module, 'scale'):
                                module.scale = original_scale
                            elif hasattr(module, 'attn') and hasattr(module.attn, 'scale'):
                                module.attn.scale = original_scale
                    
                    return output
                
                module.forward = entropy_scaled_forward
                
                if debug and path:
                    print(f"  Patched attention at: {path}")
            
            # Recursively patch child modules
            for name, child in module.named_children():
                new_path = f"{path}.{name}" if path else name
                patch_attention_module(child, new_path)
        
        # Apply patches
        patch_attention_module(diffusion_model)


NODE_CLASS_MAPPINGS = {
    "HighResFixApply": HighResFixApply,
    "HighResFixModelInjection": HighResFixModelInjection,
    "HighResFixConditioningDuplicator": HighResFixConditioningDuplicator,
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "HighResFixApply": "HighResFix Apply",
    "HighResFixModelInjection": "HighResFix Model Injection (Advanced)",
    "HighResFixConditioningDuplicator": "HighResFix Conditioning Duplicator (Advanced)",
}