import torch
import torch.nn.functional as F
from torchvision.transforms.functional import gaussian_blur, adjust_brightness, adjust_contrast, adjust_saturation, adjust_hue

try:
    from PIL import Image
    import io
    import numpy as np
    PIL_AVAILABLE = True
except ImportError:
    PIL_AVAILABLE = False

class ComfyUI_MaskAIFingerprint:
    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "image": ("IMAGE",),
                "peak_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 10.0, "step": 0.1}),
                "suppression_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}),
                "phase_noise_strength": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 0.1, "step": 0.001}),
                "freq_noise_strength": ("FLOAT", {"default": 0.005, "min": 0.0, "max": 0.05, "step": 0.001}),
                "noise_strength": ("FLOAT", {"default": 0.015, "min": 0.0, "max": 0.1, "step": 0.001}),
                "grain_strength": ("FLOAT", {"default": 0.02, "min": 0.0, "max": 0.1, "step": 0.001}),
                "salt_pepper_prob": ("FLOAT", {"default": 0.0001, "min": 0.0, "max": 0.05, "step": 0.0001}),
                "poisson_scale": ("FLOAT", {"default": 300.0, "min": 1.0, "max": 10000.0, "step": 10.0}),
                "read_noise_strength": ("FLOAT", {"default": 0.005, "min": 0.0, "max": 0.05, "step": 0.001}),
                "prnu_strength": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 0.1, "step": 0.001}),
                "blur_sigma": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 5.0, "step": 0.1}),
                "motion_blur_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.1}),
                "chromatic_aberration_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.1}),
                "vignette_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05}),
                "lens_distortion_k": ("FLOAT", {"default": 0.07, "min": -1.0, "max": 1.0, "step": 0.01}),
                "uniformity_threshold": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}),
                "brightness": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01}),
                "contrast": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01}),
                "saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.01}),
                "hue": ("FLOAT", {"default": 0.0, "min": -0.5, "max": 0.5, "step": 0.01}),
                "sharpen_amount": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.1}),
                "posterize_levels": ("INT", {"default": 50, "min": 2, "max": 256, "step": 1}),
                "resize_factor": ("FLOAT", {"default": 0.5, "min": 0.5, "max": 1.5, "step": 0.05}),
                "jpeg_quality": ("INT", {"default": 60, "min": 50, "max": 100, "step": 5}) if PIL_AVAILABLE else None,
                "jpeg_iterations": ("INT", {"default": 5, "min": 1, "max": 5, "step": 1}) if PIL_AVAILABLE else None,
            }
        }

    RETURN_TYPES = ("IMAGE", "MASK", "IMAGE")
    RETURN_NAMES = ("cleaned_image", "anomaly_mask", "fft_heatmap")
    FUNCTION = "remove_fingerprint"
    CATEGORY = "image/postprocessing"

    def apply_lens_distortion(self, img, k):
        h, w = img.shape[:2]
        device = img.device

        x = torch.linspace(-1, 1, w, device=device)
        y = torch.linspace(-1, 1, h, device=device)
        xx, yy = torch.meshgrid(x, y, indexing='xy')
        r2 = xx**2 + yy**2

        factor = 1 + k * r2

        ux = xx * factor
        uy = yy * factor

        ux = ux / torch.max(torch.abs(ux))
        uy = uy / torch.max(torch.abs(uy))

        grid = torch.stack([ux, uy], dim=-1).unsqueeze(0)

        img_t = img.permute(2, 0, 1).unsqueeze(0)

        distorted = F.grid_sample(img_t, grid, mode='bilinear', padding_mode='zeros', align_corners=False)
        distorted = distorted.squeeze(0).permute(1, 2, 0)
        return torch.clamp(distorted, 0, 1)

    def apply_motion_blur(self, img, strength):
        if strength == 0:
            return img
        kernel_size = int(strength) * 2 + 1
        kernel = torch.zeros((1, 1, kernel_size, kernel_size))
        kernel[0, 0, kernel_size//2, :] = 1.0 / kernel_size
        img = img.permute(2, 0, 1).unsqueeze(0)  # 1 x C x H x W
        blurred = F.conv2d(img, kernel, groups=3, padding=(kernel_size//2, kernel_size//2))
        return blurred.squeeze(0).permute(1, 2, 0)

    def remove_fingerprint(self, image, peak_threshold, suppression_factor, phase_noise_strength, freq_noise_strength, noise_strength, grain_strength, salt_pepper_prob, poisson_scale, read_noise_strength, prnu_strength, blur_sigma, motion_blur_strength, chromatic_aberration_strength, vignette_strength, lens_distortion_k, uniformity_threshold, brightness, contrast, saturation, hue, sharpen_amount, posterize_levels, resize_factor, jpeg_quality=None, jpeg_iterations=1):
        cleaned_images = []
        anomaly_masks = []
        fft_heatmaps = []

        device = image.device

        for img in image:  # img shape: H x W x C
            # Optional resize down
            if resize_factor != 1.0:
                new_h = int(img.shape[0] * resize_factor)
                new_w = int(img.shape[1] * resize_factor)
                img_resized = F.interpolate(img.permute(2, 0, 1).unsqueeze(0), size=(new_h, new_w), mode='bicubic', align_corners=False).squeeze(0).permute(1, 2, 0)
            else:
                img_resized = img

            # Process per channel for FFT
            channels = []
            channel_diffs = []
            channel_mags = []

            for ch in range(3):
                ch_img = img_resized[:, :, ch]  # H x W

                # FFT-based anomaly removal
                fft = torch.fft.fftn(ch_img)
                fshift = torch.fft.fftshift(fft)
                magnitude = torch.abs(fshift)
                phase = torch.angle(fshift)

                log_magnitude = torch.log(magnitude + 1)

                # Detect peaks
                flat_log_mag = log_magnitude.flatten()
                mean_log = torch.mean(flat_log_mag)
                std_log = torch.std(flat_log_mag)
                thresh = mean_log + peak_threshold * std_log

                h, w = log_magnitude.shape
                center_h, center_w = h // 2, w // 2

                peak_mask = (log_magnitude > thresh)
                peak_mask[center_h, center_w] = False

                # Suppress peaks softly
                suppressed_magnitude = magnitude.clone()
                suppressed_magnitude[peak_mask] *= suppression_factor

                # Add frequency noise
                if freq_noise_strength > 0:
                    freq_noise = torch.randn_like(suppressed_magnitude) * freq_noise_strength * suppressed_magnitude.mean()
                    suppressed_magnitude += freq_noise
                    suppressed_magnitude = torch.clamp(suppressed_magnitude, min=0)

                # Add phase noise
                if phase_noise_strength > 0:
                    phase_noise = torch.randn_like(phase) * phase_noise_strength
                    phase += phase_noise

                # Reconstruct
                fshift_suppressed = suppressed_magnitude * torch.exp(1j * phase)

                # Inverse FFT
                f_ishift = torch.fft.ifftshift(fshift_suppressed)
                cleaned_ch = torch.fft.ifftn(f_ishift)
                cleaned_ch = torch.real(cleaned_ch)
                cleaned_ch = torch.clamp(cleaned_ch, 0, 1)

                channels.append(cleaned_ch)

                diff = torch.abs(ch_img - cleaned_ch)
                channel_diffs.append(diff)

                mag_vis = torch.log(magnitude + 1)
                channel_mags.append(mag_vis)

            cleaned_img = torch.stack(channels, dim=2)  # H x W x C

            # Resize back if resized
            if resize_factor != 1.0:
                cleaned_img = F.interpolate(cleaned_img.permute(2, 0, 1).unsqueeze(0), size=(img.shape[0], img.shape[1]), mode='bicubic', align_corners=False).squeeze(0).permute(1, 2, 0)
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Simulate camera noises
            # PRNU (multiplicative)
            if prnu_strength > 0:
                prnu_map = 1 + torch.randn_like(cleaned_img) * prnu_strength
                cleaned_img *= prnu_map
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Poisson (shot noise)
            if poisson_scale > 0:
                scaled = cleaned_img * poisson_scale
                poisson_noise = torch.poisson(scaled) / poisson_scale
                cleaned_img = poisson_noise
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Read noise (additive Gaussian)
            if read_noise_strength > 0:
                read_noise = torch.randn_like(cleaned_img) * read_noise_strength
                cleaned_img += read_noise
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Lens distortion
            if lens_distortion_k != 0.0:
                cleaned_img = self.apply_lens_distortion(cleaned_img, lens_distortion_k)

            # Chromatic aberration
            if chromatic_aberration_strength > 0:
                r = cleaned_img[:, :, 0]
                g = cleaned_img[:, :, 1]
                b = cleaned_img[:, :, 2]

                shift = int(chromatic_aberration_strength)
                r_shifted = torch.roll(r, shifts=shift, dims=1)
                b_shifted = torch.roll(b, shifts=-shift, dims=1)

                cleaned_img = torch.stack([r_shifted, g, b_shifted], dim=2)
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Vignette
            if vignette_strength > 0:
                h, w = cleaned_img.shape[:2]
                x = torch.linspace(-1, 1, w).to(device)
                y = torch.linspace(-1, 1, h).to(device)
                X, Y = torch.meshgrid(x, y, indexing='xy')
                vignette = 1 - vignette_strength * (X**2 + Y**2)
                vignette = vignette.clamp(0, 1).unsqueeze(2)
                cleaned_img *= vignette
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Additive noise
            if noise_strength > 0:
                noise = torch.randn_like(cleaned_img) * noise_strength
                cleaned_img += noise
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Film grain
            if grain_strength > 0:
                grain = 1 + torch.randn_like(cleaned_img) * grain_strength
                cleaned_img *= grain
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Salt and pepper noise
            if salt_pepper_prob > 0:
                rand = torch.rand_like(cleaned_img)
                cleaned_img[rand < salt_pepper_prob / 2] = 0
                cleaned_img[rand > 1 - salt_pepper_prob / 2] = 1

            # Gaussian blur
            if blur_sigma > 0:
                cleaned_img = cleaned_img.permute(2, 0, 1).unsqueeze(0)  # 1 x C x H x W
                cleaned_img = gaussian_blur(cleaned_img, kernel_size=3, sigma=blur_sigma)
                cleaned_img = cleaned_img.squeeze(0).permute(1, 2, 0)

            # Motion blur
            if motion_blur_strength > 0:
                cleaned_img = self.apply_motion_blur(cleaned_img, motion_blur_strength)
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Color adjustments
            cleaned_img = cleaned_img.permute(2, 0, 1)  # C x H x W
            if brightness != 0.0:
                cleaned_img = adjust_brightness(cleaned_img, 1 + brightness)
            if contrast != 1.0:
                cleaned_img = adjust_contrast(cleaned_img, contrast)
            if saturation != 1.0:
                cleaned_img = adjust_saturation(cleaned_img, saturation)
            if hue != 0.0:
                cleaned_img = adjust_hue(cleaned_img, hue)
            cleaned_img = cleaned_img.permute(1, 2, 0)  # H x W x C

            # Sharpen
            if sharpen_amount > 0:
                blurred = gaussian_blur(cleaned_img.permute(2, 0, 1).unsqueeze(0), kernel_size=5, sigma=1.0).squeeze(0).permute(1, 2, 0)
                sharpened = cleaned_img + (cleaned_img - blurred) * sharpen_amount
                cleaned_img = torch.clamp(sharpened, 0, 1)

            # Posterize
            if posterize_levels < 256:
                cleaned_img = torch.floor(cleaned_img * (posterize_levels - 1) + 0.5) / (posterize_levels - 1)
                cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # JPEG compression simulation if PIL available
            if PIL_AVAILABLE and jpeg_quality is not None and jpeg_quality < 100:
                for _ in range(jpeg_iterations):
                    img_np = (cleaned_img.cpu() * 255).numpy().astype(np.uint8)
                    pil_img = Image.fromarray(img_np)
                    buffer = io.BytesIO()
                    pil_img.save(buffer, format="JPEG", quality=jpeg_quality)
                    buffer.seek(0)
                    pil_back = Image.open(buffer)
                    img_back = np.array(pil_back) / 255.0
                    cleaned_img = torch.from_numpy(img_back).to(device)
                    cleaned_img = torch.clamp(cleaned_img, 0, 1)

            # Uniformity mask
            gray = torch.mean(cleaned_img, dim=2, keepdim=True).squeeze(2)
            local_mean = F.avg_pool2d(gray.unsqueeze(0).unsqueeze(0), kernel_size=5, stride=1, padding=2).squeeze()
            local_mean_sq = F.avg_pool2d((gray ** 2).unsqueeze(0).unsqueeze(0), kernel_size=5, stride=1, padding=2).squeeze()
            local_var = local_mean_sq - local_mean ** 2
            local_std = torch.sqrt(torch.clamp(local_var, min=0))
            uniform_mask = (local_std < uniformity_threshold).float()

            # Combine mask
            fft_diff_mask = torch.mean(torch.stack(channel_diffs), dim=0)
            fft_diff_mask = F.interpolate(fft_diff_mask.unsqueeze(0).unsqueeze(0), size=(img.shape[0], img.shape[1]), mode='bilinear', align_corners=False).squeeze() if resize_factor != 1.0 else fft_diff_mask
            combined_mask = (fft_diff_mask + uniform_mask) / 2.0
            combined_mask = torch.clamp(combined_mask, 0, 1)

            # Heatmap
            avg_mag = torch.mean(torch.stack(channel_mags), dim=0)
            avg_mag = F.interpolate(avg_mag.unsqueeze(0).unsqueeze(0), size=(img.shape[0], img.shape[1]), mode='bilinear', align_corners=False).squeeze() if resize_factor != 1.0 else avg_mag
            avg_mag = (avg_mag - avg_mag.min()) / (avg_mag.max() - avg_mag.min() + 1e-8)
            fft_heatmap = avg_mag.unsqueeze(2).repeat(1, 1, 3)

            cleaned_images.append(cleaned_img)
            anomaly_masks.append(combined_mask)
            fft_heatmaps.append(fft_heatmap)

        return (torch.stack(cleaned_images), torch.stack(anomaly_masks), torch.stack(fft_heatmaps))

# Register the node
NODE_CLASS_MAPPINGS = {
    "MaskAIFingerprint": ComfyUI_MaskAIFingerprint
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "MaskAIFingerprint": "Mask AI Fingerprint"
}