import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F


def gen_filters(size_x: int, size_y: int, dct_or_idct_fun: callable) -> np.ndarray:
    tile_size_x = 8
    filters = np.zeros((size_x * size_y, size_x, size_y))
    for k_y in range(size_y):
        for k_x in range(size_x):
            for n_y in range(size_y):
                for n_x in range(size_x):
                    filters[k_y * tile_size_x + k_x, n_y, n_x] = dct_or_idct_fun(n_y, k_y, size_y) * dct_or_idct_fun(n_x, k_x, size_x)
    return filters


def get_jpeg_yuv_filter_mask(image_shape: tuple, window_size: int, keep_count: int):
    mask = np.zeros((window_size, window_size), dtype=np.uint8)
    index_order = sorted(((x, y) for x in range(window_size) for y in range(window_size)),
                         key=lambda p: (p[0] + p[1], -p[1] if (p[0] + p[1]) % 2 else p[1]))
    for i, j in index_order[:keep_count]:
        mask[i, j] = 1
    return np.tile(mask, (int(np.ceil(image_shape[0] / window_size)),
                          int(np.ceil(image_shape[1] / window_size))))[0:image_shape[0], 0:image_shape[1]]


def dct_coeff(n, k, N):
    return np.cos(np.pi / N * (n + 0.5) * k)


def idct_coeff(n, k, N):
    return (int(0 == n) * (-0.5) + np.cos(np.pi / N * (k + 0.5) * n)) * np.sqrt(1 / (2. * N))


class JpegCompression(nn.Module):
    def __init__(self, device, yuv_keep_weights=(25, 9, 9)):
        super(JpegCompression, self).__init__()
        self.device = device

        self.dct_conv_weights = torch.tensor(gen_filters(8, 8, dct_coeff), dtype=torch.float32).unsqueeze(1).to(self.device)
        self.idct_conv_weights = torch.tensor(gen_filters(8, 8, idct_coeff), dtype=torch.float32).unsqueeze(1).to(self.device)

        self.yuv_keep_weighs = yuv_keep_weights
        self.jpeg_mask = None
        self.create_mask((1000, 1000))  # 默认缓存大图，避免反复生成

    def create_mask(self, requested_shape):
        self.jpeg_mask = torch.empty((3,) + requested_shape, device=self.device)
        for channel, weights_to_keep in enumerate(self.yuv_keep_weighs):
            mask = torch.from_numpy(get_jpeg_yuv_filter_mask(requested_shape, 8, weights_to_keep)).to(self.device)
            self.jpeg_mask[channel] = mask

    def get_mask(self, hw_shape):
        H, W = hw_shape
        if self.jpeg_mask.shape[1:] < (H, W):
            self.create_mask((H, W))
        return self.jpeg_mask[:, :H, :W]  # [3, H, W]

    def apply_conv(self, image, filter_type: str):
        filters = self.dct_conv_weights if filter_type == 'dct' else self.idct_conv_weights
        conv_channels = []

        for c in range(image.shape[1]):
            ch = image[:, c:c+1, :, :]  # [B, 1, H, W]
            conv = F.conv2d(ch, filters, stride=8)  # [B, 64, H//8, W//8]
            conv = F.interpolate(conv, scale_factor=8, mode='nearest')  # upscale回原分辨率
            conv_channels.append(conv)

        return torch.stack(conv_channels, dim=1).squeeze(2)  # [B, 3, H, W]

    def forward(self, noised_and_cover):
        noised_image = noised_and_cover[0]
        cover_image = noised_and_cover[1]

        B, C, H, W = noised_image.shape
        pad_h = (8 - H % 8) % 8
        pad_w = (8 - W % 8) % 8

        padded = F.pad(noised_image, (0, pad_w, 0, pad_h), mode='constant', value=0)  # [B, 3, H', W']

        # RGB → YUV
        r, g, b = padded[:, 0:1], padded[:, 1:2], padded[:, 2:3]
        Y = 0.299 * r + 0.587 * g + 0.114 * b
        U = -0.14713 * r - 0.28886 * g + 0.436 * b
        V = 0.615 * r - 0.51499 * g - 0.10001 * b
        yuv = torch.cat([Y, U, V], dim=1)

        # DCT
        yuv_dct = self.apply_conv(yuv, 'dct')  # [B, 3, H, W]

        # Mask (broadcast)
        mask = self.get_mask(yuv_dct.shape[2:])  # [3, H, W]
        mask = mask.unsqueeze(0).repeat(B, 1, 1, 1)  # [B, 3, H, W]

        # Apply mask
        masked = yuv_dct * mask  # [B, 3, H, W]

        # IDCT
        yuv_idct = self.apply_conv(masked, 'idct')

        # YUV → RGB
        Y, U, V = yuv_idct[:, 0:1], yuv_idct[:, 1:2], yuv_idct[:, 2:3]
        R = Y + 1.13983 * V
        G = Y - 0.39465 * U - 0.58060 * V
        B = Y + 2.03211 * U
        rgb = torch.cat([R, G, B], dim=1)

        # Unpad
        rgb = rgb[:, :, :H, :W]

        # Ensure size matches input
        if rgb.shape[2:] != cover_image.shape[2:]:
            rgb = F.interpolate(rgb, size=cover_image.shape[2:], mode='bilinear', align_corners=False)

        return [rgb, cover_image]
