import torch
import torch.nn.functional as F
import numpy as np

def project(a, b):
    dtype = a.dtype
    a, b = a.double(), b.double()
    b_norm = torch.nn.functional.normalize(b.flatten(1), dim=-1).view_as(b)
    a_par = (a * b_norm).sum(dim=[1, 2, 3], keepdim=True) * b_norm
    a_orth = a - a_par
    return a_par.to(dtype), a_orth.to(dtype)

def _get_gaussian_kernel(kernel_size: int, sigma: float, device, dtype):
    coords = torch.arange(kernel_size, device=device, dtype=dtype) - (kernel_size - 1) / 2.0
    g = (-(coords**2 / (2 * sigma**2))).exp()
    g /= g.sum()
    return torch.outer(g, g).view(1, 1, kernel_size, kernel_size)

def _gaussian_blur(x: torch.Tensor, kernel_size: int = 5, sigma: float = 1.0):
    b, c, h, w = x.shape
    kernel = _get_gaussian_kernel(kernel_size, sigma, x.device, x.dtype).repeat(c, 1, 1, 1)
    return F.conv2d(x, kernel, padding=(kernel_size - 1) // 2, stride=1, groups=c)

def _pyr_down(x: torch.Tensor):
    blurred = _gaussian_blur(x)
    return F.interpolate(blurred, scale_factor=0.5, mode='bilinear', align_corners=False, recompute_scale_factor=True)

def _pyr_up(x: torch.Tensor):
    upsampled = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=False, recompute_scale_factor=True)
    return _gaussian_blur(upsampled)

def _build_laplacian_pyramid(tensor: torch.Tensor, levels: int):
    pyramid = []
    current_tensor = tensor
    for _ in range(levels - 1):
        down = _pyr_down(current_tensor)
        up = _pyr_up(down)
        _, _, h, w = current_tensor.shape
        up_resized = F.interpolate(up, size=(h, w), mode='bilinear', align_corners=False)
        pyramid.append(current_tensor - up_resized)
        current_tensor = down
    pyramid.append(current_tensor)
    return pyramid

def _build_image_from_pyramid(pyramid: list):
    img = pyramid[-1]
    for i in range(len(pyramid) - 2, -1, -1):
        up = _pyr_up(img)
        _, _, h, w = pyramid[i].shape
        up_resized = F.interpolate(up, size=(h, w), mode='bilinear', align_corners=False)
        img = up_resized + pyramid[i]
    return img

class FDGPatcher:
    """
    An implementation of Frequency-Decoupled Guidance (FDG) from the paper
    "Guidance in the Frequency Domain Enables High-Fidelity Sampling at Low CFG Scales"
    (arXiv:2506.19713). This node patches the model's CFG function to allow for
    separate guidance strengths on different image frequencies.

    This version uses a pure PyTorch implementation for Laplacian pyramid
    construction to avoid external dependencies and handle non-ideal image
    resolutions gracefully.
    """

    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "model": ("MODEL",),
                "enabled": ("BOOLEAN", {"default": True}),
                "levels": ("INT", {"default": 4, "min": 1, "max": 8, "step": 1, "display": "slider"}),
                "freq_guidance_high": ("FLOAT", {"default": 8.0, "min": -20.0, "max": 20.0, "step": 0.1, "display": "slider", "label": "High-Freq Guidance (w_high)"}),
                "freq_guidance_low": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.1, "display": "slider", "label": "Low-Freq Guidance (w_low)"}),
                "apply_apg_projection": ("BOOLEAN", {"default": False, "label_on": "Enabled", "label_off": "Disabled", "tooltip": "Apply Adaptive Projected Guidance (APG) to reduce oversaturation."}),
                "parallel_weight": ("FLOAT", {"default": 1.0, "min": -4.0, "max": 4.0, "step": 0.05, "display": "slider", "tooltip": "Weight for the APG parallel component. Only used if APG is enabled."}),
            },
        }

    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"
    CATEGORY = "Guidance/Patchers"

    def _interpolate(self, start, end, num_steps):
        if num_steps <= 1:
            return [start] if num_steps == 1 else []
        return np.linspace(start, end, num_steps).tolist()

    def _project(self, v0: torch.Tensor, v1: torch.Tensor):
        dtype = v0.dtype
        v0, v1 = v0.float(), v1.float()
        v1_norm = torch.nn.functional.normalize(v1.flatten(1), dim=-1).view_as(v1)
        v0_parallel = (v0 * v1_norm).sum(dim=[1, 2, 3], keepdim=True) * v1_norm
        v0_orthogonal = v0 - v0_parallel
        return v0_parallel.to(dtype), v0_orthogonal.to(dtype)

    def patch(self, model, enabled, levels, freq_guidance_high, freq_guidance_low, apply_apg_projection, parallel_weight):
        if not enabled or levels < 1:
            return (model,)

        guidance_scales = self._interpolate(freq_guidance_high, freq_guidance_low, levels)
        model_clone = model.clone()

        def frequency_domain_guidance(args):
            pred_cond_x0 = args["cond_denoised"]
            pred_uncond_x0 = args["uncond_denoised"]

            pred_cond_pyramid = _build_laplacian_pyramid(pred_cond_x0, levels)
            pred_uncond_pyramid = _build_laplacian_pyramid(pred_uncond_x0, levels)

            pred_guided_pyramid = []
            for i, (p_cond, p_uncond) in enumerate(zip(pred_cond_pyramid, pred_uncond_pyramid)):
                diff = p_cond - p_uncond
                if apply_apg_projection:
                    diff_parallel, diff_orthogonal = self._project(diff, p_cond)
                    diff = parallel_weight * diff_parallel + diff_orthogonal

                scale = guidance_scales[i]
                p_guided = p_uncond + scale * diff
                pred_guided_pyramid.append(p_guided)

            guided_x0 = _build_image_from_pyramid(pred_guided_pyramid)

            x0_diff = pred_cond_x0 - pred_uncond_x0
            pred_diff = args["cond"] - args["uncond"]
            scaling_factor = pred_diff / (x0_diff + 1e-9)
            new_x0_diff = guided_x0 - pred_uncond_x0
            new_pred_diff = new_x0_diff * scaling_factor
            final_pred = args["uncond"] + new_pred_diff
            return final_pred.to(args["cond"].dtype)

        model_clone.set_model_sampler_cfg_function(frequency_domain_guidance)
        return (model_clone,)

class FDG_APG_Combined_Function:
    def __init__(self, params):
        self.p = params
        self.levels = self.p["levels"]
        self.running_averages = [0.0] * self.levels
        self.current_step = 999.0

    def __call__(self, args):
        cond_eps = args["cond"]
        uncond_eps = args["uncond"]
        step = args["model"].model_sampling.timestep(args["sigma"])[0].item()

        if self.current_step < step:
            self.current_step = 999.0
            self.running_averages = [0.0] * self.levels
        self.current_step = step

        cond_eps_pyramid = _build_laplacian_pyramid(cond_eps, self.levels)
        uncond_eps_pyramid = _build_laplacian_pyramid(uncond_eps, self.levels)
        
        guided_eps_pyramid = []
        for i, (p_cond, p_uncond) in enumerate(zip(cond_eps_pyramid, uncond_eps_pyramid)):
            diff = p_cond - p_uncond
            
            current_momentum = self.p["momentum"]
            if self.p["adaptive_momentum"] > 0:
                scale = self.p["momentum"]
                scale -= scale * (self.p["adaptive_momentum"]**4) * (1000 - step)
                if self.p["momentum"] < 0 and scale > 0: scale = 0
                elif self.p["momentum"] > 0 and scale < 0: scale = 0
                current_momentum = scale

            self.running_averages[i] = diff + current_momentum * self.running_averages[i]
            refined_diff = self.running_averages[i]

            if self.p["norm_threshold"] > 0.0:
                diff_norm = refined_diff.norm(p=2, dim=[-1, -2, -3], keepdim=True)
                scale_factor = torch.minimum(torch.ones_like(refined_diff), self.p["norm_threshold"] / diff_norm)
                refined_diff = refined_diff * scale_factor

            diff_parallel, diff_orthogonal = project(refined_diff, p_cond)
            refined_diff = diff_orthogonal + self.p["eta"] * diff_parallel
            
            scale = self.p["guidance_scales"][i]
            p_guided = p_uncond + scale * refined_diff
            guided_eps_pyramid.append(p_guided)

        final_guided_eps = _build_image_from_pyramid(guided_eps_pyramid)
        return final_guided_eps.to(args["cond"].dtype)

class FDG_APG_Patcher:
    @classmethod
    def INPUT_TYPES(cls):
        return { "required": {
                "model": ("MODEL",), "enabled": ("BOOLEAN", {"default": True}),
                "levels": ("INT", {"default": 4, "min": 1, "max": 8, "step": 1}),
                "freq_guidance_high": ("FLOAT", {"default": 8.0, "min": -20.0, "max": 20.0, "step": 0.1}),
                "freq_guidance_low": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.1}),
                "momentum": ("FLOAT", {"default": 0.5, "min": -1.0, "max": 1.0, "step": 0.01}),
                "eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                "norm_threshold": ("FLOAT", {"default": 15.0, "min": 0.0, "max": 50.0, "step": 0.1}),
                "adaptive_momentum": ("FLOAT", {"default": 0.18, "min": 0, "max": 1.0, "step": 0.01}),
            }, }
    RETURN_TYPES = ("MODEL",)
    FUNCTION = "patch"
    CATEGORY = "Guidance/Patchers"

    def patch(self, model, enabled, levels, freq_guidance_high, freq_guidance_low, 
              momentum, eta, norm_threshold, adaptive_momentum):
        if not enabled: return (model,)
        params = {
            "levels": levels,
            "guidance_scales": np.linspace(freq_guidance_high, freq_guidance_low, levels).tolist(),
            "momentum": momentum, "eta": eta, "norm_threshold": norm_threshold, "adaptive_momentum": adaptive_momentum,
        }
        guidance_calculator = FDG_APG_Combined_Function(params)
        model_clone = model.clone()
        model_clone.set_model_sampler_cfg_function(guidance_calculator)
        return (model_clone,)

NODE_CLASS_MAPPINGS = {
    "FrequencyDecoupledGuidance": FDGPatcher,
    "FDG_APG_Patcher": FDG_APG_Patcher
}
NODE_DISPLAY_NAME_MAPPINGS = {
    "FrequencyDecoupledGuidance": "FrequencyDecoupledGuidance",
    "FDG_APG_Patcher": "FrequencyDecoupledGuidance & True APG"
}