import torch
import numpy as np
from PIL import Image
from tqdm.auto import tqdm
from skimage.color import rgb2lab, lab2rgb
import cv2

device_cpu = torch.device("cpu")  # ✅ 全局CPU

@torch.no_grad()
def compute_gradient_magnitude(img_gray):
    B, H, W = img_gray.shape
    kernel_x = torch.tensor([[-1, 0, 1],
                             [-2, 0, 2],
                             [-1, 0, 1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)
    kernel_y = torch.tensor([[-1, -2, -1],
                             [0, 0, 0],
                             [1, 2, 1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0)
    img_gray = img_gray.unsqueeze(1)
    grad_x = torch.nn.functional.conv2d(img_gray, kernel_x, padding=1)
    grad_y = torch.nn.functional.conv2d(img_gray, kernel_y, padding=1)
    return torch.sqrt(grad_x ** 2 + grad_y ** 2).squeeze(1)

@torch.no_grad()
def slic_batch_gslic_torch_strict_blocks(imgs, region_size=16, color_weight=10.0, iter_num=10):
    imgs = imgs.float() / 255.0
    B, H, W, C = imgs.shape
    assert C == 3
    S = region_size

    imgs_gray = imgs.mean(dim=-1)
    grad_mag = compute_gradient_magnitude(imgs_gray)

    cy = torch.arange(S // 2, H, S)
    cx = torch.arange(S // 2, W, S)
    grid_y, grid_x = torch.meshgrid(cy, cx, indexing='ij')
    centers_yx = torch.stack([grid_y.flatten(), grid_x.flatten()], dim=1)
    K = centers_yx.shape[0]
    grid_h, grid_w = cy.numel(), cx.numel()

    centers_color = imgs[:, centers_yx[:, 0], centers_yx[:, 1]]
    centers = torch.cat([centers_yx.unsqueeze(0).repeat(B, 1, 1).float(), centers_color], dim=2)

    yy = torch.arange(H).view(1, H, 1).expand(B, H, W)
    xx = torch.arange(W).view(1, 1, W).expand(B, H, W)

    cell_y = (yy / S).long()
    cell_x = (xx / S).long()
    neigh_offsets = torch.tensor([[0, 0], [1, 0], [0, 1], [1, 1]])
    neigh_ids = []
    for dy, dx in neigh_offsets:
        ny = (cell_y + dy).clamp(0, grid_h - 1)
        nx = (cell_x + dx).clamp(0, grid_w - 1)
        neigh_ids.append(ny * grid_w + nx)
    neigh_ids = torch.stack(neigh_ids, dim=-1)

    labels = None

    for it in tqdm(range(iter_num), desc="GSLICR CPU", unit="轮"):
        B_idx = torch.arange(B).view(B, 1, 1, 1).expand(B, H, W, 4)
        centers_sel = centers[B_idx, neigh_ids]

        c_rgb = centers_sel[..., 2:]
        d_color = torch.norm(imgs.unsqueeze(3) - c_rgb, dim=-1)

        dy = yy.unsqueeze(3).float() - centers_sel[..., 0]
        dx = xx.unsqueeze(3).float() - centers_sel[..., 1]
        d_space = torch.sqrt(dy ** 2 + dx ** 2) / S

        edge_weight = 1.0 + grad_mag.unsqueeze(-1) * 5.0
        d_total = d_color + color_weight * d_space * edge_weight
        _, min_idx4 = torch.min(d_total, dim=-1)
        labels = neigh_ids.gather(-1, min_idx4.unsqueeze(-1)).squeeze(-1)

        flat_labels = labels.reshape(B, -1)
        flat_imgs = imgs.reshape(B, -1, 3)
        yy_flat = yy.reshape(B, -1).float()
        xx_flat = xx.reshape(B, -1).float()

        counts = torch.zeros(B, K).scatter_add_(
            1, flat_labels, torch.ones_like(flat_labels, dtype=torch.float)
        ).clamp_min_(1.0)
        sum_y = torch.zeros(B, K).scatter_add_(1, flat_labels, yy_flat)
        sum_x = torch.zeros(B, K).scatter_add_(1, flat_labels, xx_flat)
        sum_rgb = torch.zeros(B, K, 3).scatter_add_(
            1, flat_labels.unsqueeze(-1).expand(-1, -1, 3), flat_imgs
        )

        centers = torch.cat([
            (sum_y / counts).unsqueeze(-1),
            (sum_x / counts).unsqueeze(-1),
            sum_rgb / counts.unsqueeze(-1)
        ], dim=2)

    B_idx = torch.arange(B).view(B, 1, 1).expand(B, H, W)
    pixel_colors = centers[B_idx, labels]
    pixel_colors = pixel_colors[..., 2:]
    return (pixel_colors * 255).clamp(0, 255).byte().numpy()

def pixel_standardize_mode(img_np, target_w=64):
    H, W, _ = img_np.shape
    aspect_ratio = H / W
    target_h = int(round(target_w * aspect_ratio))
    small_img = Image.fromarray(img_np).resize((target_w, target_h), Image.NEAREST)
    return np.array(small_img)

@torch.no_grad()
def apply_color_quantization(img_np, max_color_number=16, max_iter=20):
    if max_color_number is None or max_color_number <= 0:
        return img_np
    H, W, _ = img_np.shape
    img_lab = rgb2lab(img_np / 255.0).astype(np.float32)
    img_lab_t = torch.from_numpy(img_lab.reshape(-1, 3))

    N = img_lab_t.shape[0]
    K = min(max_color_number, N)
    indices = torch.randperm(N)[:K]
    centers = img_lab_t[indices]

    for _ in range(max_iter):
        dists = torch.cdist(img_lab_t, centers)
        labels = torch.argmin(dists, dim=1)

        new_centers = torch.zeros_like(centers)
        for k in range(K):
            mask = (labels == k)
            if mask.sum() > 0:
                new_centers[k] = img_lab_t[mask].mean(dim=0)
            else:
                new_centers[k] = centers[k]
        if torch.allclose(new_centers, centers, atol=1e-4):
            break
        centers = new_centers

    quant_lab = centers[labels].numpy().reshape(H, W, 3)
    return (lab2rgb(quant_lab) * 255).clip(0, 255).astype(np.uint8)

def apply_noise_filter(img_np, strength=1):
    if strength <= 0:
        return img_np
    ksize = max(3, 2 * strength + 1)
    return cv2.blur(img_np, (ksize, ksize))

def simple_compress(img_np, target_w, target_h):
    return np.array(Image.fromarray(img_np).resize((target_w, target_h), Image.BICUBIC))

def run(input_imgs, region_size=16, color_weight=10.0, iter_num=10,
        enable_slic=True, enable_pixelization=False,
        enable_color_quant=False, quant_colors=16, enable_compression=False,
        target_width=None, target_height=None,
        enable_domain_interp=False, enable_noise_filter=0):

    if isinstance(input_imgs, Image.Image):
        input_imgs = [input_imgs]
    elif isinstance(input_imgs, np.ndarray):
        input_imgs = [Image.fromarray(input_imgs)] if input_imgs.ndim == 3 else [Image.fromarray(im) for im in input_imgs]

    imgs_np = [np.array(im.convert("RGB")) for im in input_imgs]
    results = []

    print(f"[run] CPU模式 | region_size={region_size}, color_weight={color_weight}, iter_num={iter_num}")

    for idx, img_np in enumerate(imgs_np):
        print(f"\n[run] 处理第 {idx+1} 张图片, 原始尺寸: {img_np.shape[:2]}")

        out = img_np.copy()

        if enable_slic:
            print("[run] 执行 SLIC 超像素...")
            imgs_torch = torch.from_numpy(out).unsqueeze(0)
            out = slic_batch_gslic_torch_strict_blocks(imgs_torch, region_size, color_weight, iter_num)[0]

        if enable_noise_filter > 0:
            out = apply_noise_filter(out, enable_noise_filter)

        if enable_compression and target_width and target_height:
            out = simple_compress(out, target_width, target_height)
        elif enable_pixelization and target_width:
            out = pixel_standardize_mode(out, target_w=target_width)

        if enable_color_quant:
            out = apply_color_quantization(out, max_color_number=quant_colors)

        results.append(Image.fromarray(out))

    return results
