import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import math
import matplotlib.pyplot as plt


def custom_function(x, y, sigma=1, ):
    # return x**2 + y**2
    kernel_size = sigma * 6 + 1
    return (1 / (2 * math.pi * sigma ** 2)) * torch.exp(-((x - kernel_size // 2) ** 2 + (y - kernel_size // 2) ** 2) / (2 * sigma ** 2))


def gen_kernel(sigma):
    kernel_size = sigma * 6 + 1
    x = torch.arange(kernel_size, dtype=torch.float32)
    y = torch.arange(kernel_size, dtype=torch.float32)
    X, Y = torch.meshgrid(x, y)
    kernel = custom_function(X, Y, sigma=sigma)
    kernel /= torch.sum(kernel)
    return kernel


def gaussian_filter_torch(image, sigma):
    """
    Apply Gaussian filter to the input image and visualize the Gaussian kernel.

    Parameters:
    - image: Input image (2D array).
    - sigma: Standard deviation of the Gaussian kernel.

    Returns:
    - filtered_image: Image after Gaussian filtering.
    """
    kernel_size = int(sigma * 6 + 1)
    kernel = gen_kernel(sigma).unsqueeze(0).unsqueeze(0).to(image.device)

    # Apply Gaussian filter
    padded_image = torch.nn.functional.pad(image, [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2], mode='reflect')
    filtered_image = torch.nn.functional.conv2d(padded_image.float(), kernel.float(), stride=1, padding=0)
    return filtered_image.squeeze(1)


def gen_cow_mask(image, sigma=8):
    assert isinstance(image, torch.Tensor), f"{type(image)}"
    assert image.dim() in [3, 4]
    if image.dim() == 3:
        image = image.unsqueeze(1)
    gaussian_noise = torch.rand([image.shape[0], 1, *image.shape[-2:]]).to(image.device)
    mask_gaussian = gaussian_filter_torch(gaussian_noise, sigma=sigma).to(image.device)
    mask_gaussian = torch.clip_(mask_gaussian, 0., 1.)
    return mask_gaussian.squeeze(1)


def gen_patch_mask(image, block_size=16):
    """
        生成一个块状掩码，用于图像处理。掩码会根据块大小进行分割，每个块的掩码是随机的。

        Args:
            image (torch.Tensor): 输入图像，形状为 [batch_size, channels, height, width]
            block_size (int): 每个块的大小
        Returns:
            torch.Tensor: 生成的掩码，形状与输入图像相同
        """
    assert isinstance(image, torch.Tensor), f"{type(image)}"
    assert image.dim() in [3, 4]  # batch, channels, height, width
    if image.dim() == 3:
        image = image.unsqueeze(1)

    # 获取图像的尺寸
    batch_size, channels, height, width = image.shape
    num_blocks_h = height // block_size
    num_blocks_w = width // block_size

    # 生成一个随机的mask，每个块是0或1
    block_mask = torch.randint(0, 2, (batch_size, num_blocks_h, num_blocks_w), device=image.device).float()

    # 将block_mask放大到与图像大小相同
    block_mask = F.interpolate(block_mask.unsqueeze(1), size=(height, width), mode='nearest').squeeze(1)
    return block_mask


def upsample_mask(img: torch.Tensor, mask: torch.Tensor):
    b, c, h, w = img.shape
    _, mask_h, mask_w = mask.shape
    img_mask = mask.view((b, 1, mask_h, mask_w))
    img_mask = nn.UpsamplingNearest2d((h, w))(img_mask.float()).expand((b, c, -1, -1))
    return img_mask


def is_binary(tensor):
    # 检查张量是否只包含0和1
    return (torch.eq(tensor, 0) | torch.eq(tensor, 1)).all()


def gen_cutout(img_src: torch.Tensor,
               gt_src: torch.Tensor,
               img_tgt: torch.Tensor,
               gt_tgt: torch.Tensor,
               mask=None):
    """
        the input mask vector should be 2-value(0 ir 1)
        mask 为1 的区域修改为tgt的图像内容，为0保存认为src的图像内容
        """

    assert img_src.shape == img_tgt.shape, "shape {}should equal to {}".format(img_src.shape, img_tgt.shape)
    assert gt_src.shape == gt_tgt.shape, "shape {}should equal to {}".format(gt_src.shape, gt_tgt.shape)
    assert img_src.dim() == 4 and gt_src.dim() == 4
    if mask is None:
        threshold = np.random.random() * 0.8 + 0.1
        mask = (gen_cow_mask(img_src, 8) > threshold).float().to(img_src.device)

    assert mask.dim() == 3 or mask.dim() == 2, "the mask's dim should equal to 3 or 2, but now is {} ".format(
        mask.dim())
    img_src = img_src.detach()
    # gt_src = gt_src.detach()
    gt_src = torch.ones_like(gt_src).detach() * 255.
    img_tgt = img_tgt.detach()
    gt_tgt = gt_tgt.detach()
    # gt_tgt = torch.ones_like(gt_tgt).detach() * 255.
    mask = mask.detach()

    if mask.dim() == 2:
        mask = mask.view((1, mask.shape[0], mask.shape[1])).expand((img_src.shape[0], -1, -1))
    assert is_binary(mask), f"mask tensor should binary {torch.unique(mask)}"

    img_mask = upsample_mask(img_src, mask)
    gt_mask = upsample_mask(gt_src, mask)
    img_mixup1 = (1 - img_mask) * img_src + img_mask * img_tgt
    gt_mixup1 = (1 - gt_mask) * gt_src + gt_mask * gt_tgt

    img_mixup2 = img_mask * img_src + (1 - img_mask) * img_tgt
    gt_mixup2 = gt_mask * gt_src + (1 - gt_mask) * gt_tgt

    return img_mixup1.detach(), gt_mixup1.long().detach(), img_mixup2.detach(), gt_mixup2.long().detach()


def gen_cutmix(img_src: torch.Tensor,
               gt_src: torch.Tensor,
               img_tgt: torch.Tensor,
               gt_tgt: torch.Tensor,
               mask=None):
    """
        the input mask vector should be 2-value(0 ir 1)
        mask 为1 的区域修改为tgt的图像内容，为0保存认为src的图像内容
        """

    assert img_src.shape == img_tgt.shape, "shape {}should equal to {}".format(img_src.shape, img_tgt.shape)
    assert gt_src.shape == gt_tgt.shape, "shape {}should equal to {}".format(gt_src.shape, gt_tgt.shape)
    assert img_src.dim() == 4 and gt_src.dim() == 4
    if mask is None:
        mask = gen_cow_mask(img_src, 8).to(img_src.device)

    assert mask.dim() == 3 or mask.dim() == 2, "the mask's dim should equal to 3 or 2, but now is {} ".format(
        mask.dim())
    img_src = img_src.detach()
    gt_src = gt_src.detach()
    img_tgt = img_tgt.detach()
    gt_tgt = gt_tgt.detach()
    mask = mask.detach()

    if mask.dim() == 2:
        mask = mask.view((1, mask.shape[0], mask.shape[1])).expand((img_src.shape[0], -1, -1))
    assert is_binary(mask), f"mask tensor should binary {torch.unique(mask)}"

    img_mask = upsample_mask(img_src, mask)
    gt_mask = upsample_mask(gt_src, (mask > 0.5).float())
    img_mixup1 = (1 - img_mask) * img_src + img_mask * img_tgt
    gt_mixup1 = (1 - gt_mask) * gt_src + gt_mask * gt_tgt

    img_mixup2 = img_mask * img_src + (1 - img_mask) * img_tgt
    gt_mixup2 = gt_mask * gt_src + (1 - gt_mask) * gt_tgt

    return img_mixup1.detach(), gt_mixup1.long().detach(), img_mixup2.detach(), gt_mixup2.long().detach()


def gen_mix_data(img_src: torch.Tensor,
                 gt_src: torch.Tensor,
                 img_tgt: torch.Tensor,
                 gt_tgt: torch.Tensor,
                 mode='cowout', mask=None):
    assert mode in ['cowout', 'cowmix', 'patchmix', 'patchout'], 'not supported mode : {}'.format(mode)
    if mode == 'cowout' or mode == 'patchout':
        return gen_cutout(img_src, gt_src, img_tgt, gt_tgt, mask)
    elif mode == 'cowmix' or mode == 'patchmix':
        return gen_cutmix(img_src, gt_src, img_tgt, gt_tgt, mask)
    else:
        raise ValueError(f"unsupported mode set : {mode}")


def visualize_patchmask(mask):
    """
    可视化生成的patchmask。0对应黑色，1对应白色。
    """
    plt.imshow(mask.squeeze(0).cpu().numpy(), cmap='gray')
    plt.axis('off')  # 关闭坐标轴
    plt.show()


if __name__ == '__main__':
    # 生成一个随机的图像Tensor，尺寸为 [batch_size, channels, height, width]
    batch_size = 1
    channels = 3
    height = 256
    width = 256
    image = torch.randn(batch_size, channels, height, width)

    # 生成PatchMask
    block_size = 16
    patch_mask = gen_patch_mask(image, block_size)

    # 可视化生成的patchmask
    print("Visualizing Patch Mask...")
    visualize_patchmask(patch_mask)
