import numpy as np
import cv2
from PIL import Image
import random
import torch
from torchvision import datasets, transforms
import torch.nn as nn
# from skimage.measure import compare_ssim, compare_psnr
from skimage.metrics import structural_similarity as compare_ssim
from skimage.metrics import peak_signal_noise_ratio as compare_psnr


# tensor 保存为cv2
def image_tensor2cv2(input_tensor: torch.Tensor):
    """
    将tensor保存为cv2格式
    :param input_tensor: 要保存的tensor
    :param filename: 保存的文件名
    """
    assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1)
    # 复制一份
    rgb_tensor = input_tensor.clone().detach()
    # 到cpu
    rgb_tensor = rgb_tensor.to(torch.device('cpu'))
    # 反归一化
    rgb_tensor = unnormalize(rgb_tensor)
    # 去掉批次维度
    rgb_tensor = rgb_tensor.squeeze()
    # 从[0,1]转化为[0,255]，再从CHW转为HWC，最后转为cv2
    rgb_tensor = rgb_tensor.mul_(255).add_(0.5).clamp_(
        0, 255).permute(1, 2, 0).type(torch.uint8).numpy()
    return rgb_tensor

# 反归一化
def unnormalize(tensor: torch.Tensor, inplace: bool = False) -> torch.Tensor:
    """Unnormalize a tensor image with mean and standard deviation.

    Args:
        tensor (Tensor): Tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channel.
        inplace(bool,optional): Bool to make this operation inplace.

    Returns:
        Tensor: Normalized Tensor image.
    """
    mean = [0.5, 0.5, 0.5]
    std = [0.5, 0.5, 0.5]
    if not isinstance(tensor, torch.Tensor):
        raise TypeError(
            'Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))

    if tensor.ndim < 3:
        raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
                         '{}.'.format(tensor.size()))

    if not inplace:
        tensor = tensor.clone()

    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
   
    if (std == 0).any():
        raise ValueError(
            'std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
    if mean.ndim == 1:
        mean = mean.view(-1, 1, 1)
    if std.ndim == 1:
        std = std.view(-1, 1, 1)

    tensor.mul_(std).add_(mean)
    return tensor

# 计算psnr和ssim
def psnr_and_ssim(image1, image2):
        psnr = compare_psnr(image1, image2)
        ssim = (compare_ssim(image1[:, :, 0], image2[:, :, 0]) + compare_ssim(image1[:, :, 1],
                                                                              image2[:, :, 1]) + compare_ssim(image1[:, :, 2], image2[:, :, 2])) / 3
        return psnr, ssim

# jpeg 压缩和解压缩
def encode_jpeg_to_decode_jpeg(image, ratio=85):
# def encode_jpeg_to_decode_jpeg(image, ratio=random.randint(85, 95)):
    # encode
    # 取值范围：0~100，数值越小，压缩比越高，图片质量损失越严重
    params = [cv2.IMWRITE_JPEG_QUALITY, ratio]  # ratio:0~100
    msg = cv2.imencode('.jpg', image, params)[1]
    msg = (np.array(msg)).tobytes()
    # decode
    img = cv2.imdecode(np.frombuffer(msg, np.uint8), cv2.IMREAD_COLOR)
    # 保存img
    # cv2.imwrite('test.jpg', img)
    return img

# pillow 图像进行转换tensor，归一化
def pillow_image_to_tensor_normalize(image):
    # pill_image = image_tensor2pillow(image)
    data_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    pill_image = data_transforms(image) / 255
    return pill_image

# 增加Jpeg压缩
def image_add_jpeg(image):
    jpeg_image = None
    psnr = 0
    ssim = 0

    for i in range(image.shape[0]):
        cv_image = image_tensor2cv2(image[i].unsqueeze(0))
        cv_image_jpeg = encode_jpeg_to_decode_jpeg(cv_image)
        # 计算psnr和ssim
        psnr_, ssim_ = psnr_and_ssim(cv_image, cv_image_jpeg)
        psnr += psnr_
        ssim += ssim_
        # 转成pillow
        im = Image.fromarray(cv_image_jpeg)
        im = pillow_image_to_tensor_normalize(im)
        im = im.unsqueeze(0)
        if i == 0:
            jpeg_image = im
        else:
            jpeg_image = torch.cat([jpeg_image, im], dim=0)
    jpeg_image = jpeg_image.cuda()
    return jpeg_image, psnr/image.shape[0], ssim/image.shape[0]

# 计算测试图片的psnr和ssim
def val_psnr_ssim(image, encode_image):
    psnr = 0
    ssim = 0
    for i in range(image.shape[0]):
        im = image_tensor2cv2(image[i].unsqueeze(0))
        encode_im = image_tensor2cv2(encode_image[i].unsqueeze(0))
        psnr_, ssim_ = psnr_and_ssim(im, encode_im)
        psnr += psnr_
        ssim += ssim_
    return psnr/image.shape[0], ssim/image.shape[0]

"""
    jpeg压缩
"""
class JpegBasic(nn.Module):
    def __init__(self):
        super(JpegBasic, self).__init__()
    # 量化
    def std_quantization(self, image_yuv_dct, scale_factor, round_func=torch.round):

        luminance_quant_tbl = (torch.tensor([
            [16, 11, 10, 16, 24, 40, 51, 61],
            [12, 12, 14, 19, 26, 58, 60, 55],
            [14, 13, 16, 24, 40, 57, 69, 56],
            [14, 17, 22, 29, 51, 87, 80, 62],
            [18, 22, 37, 56, 68, 109, 103, 77],
            [24, 35, 55, 64, 81, 104, 113, 92],
            [49, 64, 78, 87, 103, 121, 120, 101],
            [72, 92, 95, 98, 112, 100, 103, 99]
        ], dtype=torch.float) * scale_factor).round().to(image_yuv_dct.device).clamp(min=1).repeat(
            image_yuv_dct.shape[2] // 8, image_yuv_dct.shape[3] // 8)

        chrominance_quant_tbl = (torch.tensor([
            [17, 18, 24, 47, 99, 99, 99, 99],
            [18, 21, 26, 66, 99, 99, 99, 99],
            [24, 26, 56, 99, 99, 99, 99, 99],
            [47, 66, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99]
        ], dtype=torch.float) * scale_factor).round().to(image_yuv_dct.device).clamp(min=1).repeat(
            image_yuv_dct.shape[2] // 8, image_yuv_dct.shape[3] // 8)

        q_image_yuv_dct = image_yuv_dct.clone()
        q_image_yuv_dct[:, :1, :, :] = image_yuv_dct[:, :1, :, :] / luminance_quant_tbl
        q_image_yuv_dct[:, 1:, :, :] = image_yuv_dct[:, 1:, :, :] / chrominance_quant_tbl
        q_image_yuv_dct_round = round_func(q_image_yuv_dct)
        return q_image_yuv_dct_round
    # 反量化
    def std_reverse_quantization(self, q_image_yuv_dct, scale_factor):

        luminance_quant_tbl = (torch.tensor([
            [16, 11, 10, 16, 24, 40, 51, 61],
            [12, 12, 14, 19, 26, 58, 60, 55],
            [14, 13, 16, 24, 40, 57, 69, 56],
            [14, 17, 22, 29, 51, 87, 80, 62],
            [18, 22, 37, 56, 68, 109, 103, 77],
            [24, 35, 55, 64, 81, 104, 113, 92],
            [49, 64, 78, 87, 103, 121, 120, 101],
            [72, 92, 95, 98, 112, 100, 103, 99]
        ], dtype=torch.float) * scale_factor).round().to(q_image_yuv_dct.device).clamp(min=1).repeat(
            q_image_yuv_dct.shape[2] // 8, q_image_yuv_dct.shape[3] // 8)

        chrominance_quant_tbl = (torch.tensor([
            [17, 18, 24, 47, 99, 99, 99, 99],
            [18, 21, 26, 66, 99, 99, 99, 99],
            [24, 26, 56, 99, 99, 99, 99, 99],
            [47, 66, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99],
            [99, 99, 99, 99, 99, 99, 99, 99]
        ], dtype=torch.float) * scale_factor).round().to(q_image_yuv_dct.device).clamp(min=1).repeat(
            q_image_yuv_dct.shape[2] // 8, q_image_yuv_dct.shape[3] // 8)

        image_yuv_dct = q_image_yuv_dct.clone()
        image_yuv_dct[:, :1, :, :] = q_image_yuv_dct[:, :1, :, :] * luminance_quant_tbl
        image_yuv_dct[:, 1:, :, :] = q_image_yuv_dct[:, 1:, :, :] * chrominance_quant_tbl
        return image_yuv_dct
    # DCT变换
    def dct(self, image):
        # coff for dct and idct
        coff = torch.zeros((8, 8), dtype=torch.float).to(image.device)
        coff[0, :] = 1 * np.sqrt(1 / 8)
        for i in range(1, 8):
            for j in range(8):
                coff[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * 8)) * np.sqrt(2 / 8)

        split_num = image.shape[2] // 8
        image_dct = torch.cat(torch.cat(image.split(8, 2), 0).split(8, 3), 0)
        image_dct = torch.matmul(coff, image_dct)
        image_dct = torch.matmul(image_dct, coff.permute(1, 0))
        image_dct = torch.cat(torch.cat(image_dct.chunk(split_num, 0), 3).chunk(split_num, 0), 2)

        return image_dct
    # IDCT变换
    def idct(self, image_dct):
        # coff for dct and idct
        coff = torch.zeros((8, 8), dtype=torch.float).to(image_dct.device)
        coff[0, :] = 1 * np.sqrt(1 / 8)
        for i in range(1, 8):
            for j in range(8):
                coff[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * 8)) * np.sqrt(2 / 8)

        split_num = image_dct.shape[2] // 8
        image = torch.cat(torch.cat(image_dct.split(8, 2), 0).split(8, 3), 0)
        image = torch.matmul(coff.permute(1, 0), image)
        image = torch.matmul(image, coff)
        image = torch.cat(torch.cat(image.chunk(split_num, 0), 3).chunk(split_num, 0), 2)

        return image
    # RGB2YUV
    def rgb2yuv(self, image_rgb):
        image_yuv = torch.empty_like(image_rgb)
        image_yuv[:, 0:1, :, :] = 0.299 * image_rgb[:, 0:1, :, :] \
                                  + 0.587 * image_rgb[:, 1:2, :, :] + 0.114 * image_rgb[:, 2:3, :, :]
        image_yuv[:, 1:2, :, :] = -0.1687 * image_rgb[:, 0:1, :, :] \
                                  - 0.3313 * image_rgb[:, 1:2, :, :] + 0.5 * image_rgb[:, 2:3, :, :]
        image_yuv[:, 2:3, :, :] = 0.5 * image_rgb[:, 0:1, :, :] \
                                  - 0.4187 * image_rgb[:, 1:2, :, :] - 0.0813 * image_rgb[:, 2:3, :, :]
        return image_yuv
    # YUV2RGB
    def yuv2rgb(self, image_yuv):
        image_rgb = torch.empty_like(image_yuv)
        image_rgb[:, 0:1, :, :] = image_yuv[:, 0:1, :, :] + 1.40198758 * image_yuv[:, 2:3, :, :]
        image_rgb[:, 1:2, :, :] = image_yuv[:, 0:1, :, :] - 0.344113281 * image_yuv[:, 1:2, :, :] \
                                  - 0.714103821 * image_yuv[:, 2:3, :, :]
        image_rgb[:, 2:3, :, :] = image_yuv[:, 0:1, :, :] + 1.77197812 * image_yuv[:, 1:2, :, :]
        return image_rgb

    def yuv_dct(self, image):
        # clamp and convert from [-1,1] to [0,255]
        # image = (image.clamp(-1, 1) + 1) * 255 / 2
        image = image * 255

        # pad the image so that we can do dct on 8x8 blocks
        pad_height = (8 - image.shape[2] % 8) % 8
        pad_width = (8 - image.shape[3] % 8) % 8
        image = nn.ZeroPad2d((0, pad_width, 0, pad_height))(image)

        # convert to yuv
        image_yuv = self.rgb2yuv(image)

        assert image_yuv.shape[2] % 8 == 0
        assert image_yuv.shape[3] % 8 == 0

        # apply dct
        image_dct = self.dct(image_yuv)

        return image_dct, pad_width, pad_height

    def idct_rgb(self, image_quantization, pad_width, pad_height):
        # apply inverse dct (idct)
        image_idct = self.idct(image_quantization)

        # transform from yuv to to rgb
        image_ret_padded = self.yuv2rgb(image_idct)

        # un-pad
        image_rgb = image_ret_padded[:, :, :image_ret_padded.shape[2] - pad_height,
                    :image_ret_padded.shape[3] - pad_width].clone()

        return image_rgb / 255


class Jpeg(JpegBasic):
    def __init__(self, Q=50):
        super(Jpeg, self).__init__()

        # quantization table
        self.Q = Q
        self.scale_factor = 2 - self.Q * 0.02 if self.Q >= 50 else 50 / self.Q


    def forward(self, image):
        # image, cover_image = image_and_cover

        # [-1,1] to [0,255], rgb2yuv, dct
        image_dct, pad_width, pad_height = self.yuv_dct(image)

        # quantization
        image_quantization = self.std_quantization(image_dct, self.scale_factor)

        # reverse quantization
        image_quantization = self.std_reverse_quantization(image_quantization, self.scale_factor)

        # idct, yuv2rgb, [0,255] to [-1,1]
        noised_image = self.idct_rgb(image_quantization, pad_width, pad_height)
        return noised_image.clamp(-1, 1)  # noised_image.clamp(0, 1)


class Jpeg_dct(JpegBasic):
    def __init__(self, Q = 50):
        super(Jpeg_dct, self).__init__()
        # quantization table
        self.Q = Q
        self.scale_factor = 2 - self.Q * 0.02 if self.Q >= 50 else 50 / self.Q

    def forward(self, image_dct):
        # image, cover_image = image_and_cover

        image_re_quantization = self.std_reverse_quantization(image_dct, self.scale_factor)

        image_quantization = self.std_quantization(image_re_quantization, self.scale_factor)

        return image_quantization  # noised_image.clamp(0, 1)

class Jpeg_dct2rgb(JpegBasic):
    def __init__(self, Q=50, subsample=0):
        super(Jpeg_dct2rgb, self).__init__()
        # quantization table
        self.Q = Q
        self.scale_factor = 2 - self.Q * 0.02 if self.Q >= 50 else 50 / self.Q

    def forward(self, image_dct):
        # image, cover_image = image_and_cover
        pad_height = (8 - image_dct.shape[2] % 8) % 8
        pad_width = (8 - image_dct.shape[3] % 8) % 8

        # reverse quantization
        image_quantization = self.std_reverse_quantization(image_dct, self.scale_factor)

        # idct, yuv2rgb, [0,255] to [-1,1]
        noised_image = self.idct_rgb(image_quantization, pad_width, pad_height)
        return noised_image.clamp(-1, 1)

"""
    DCT频率提取与还原
"""
# 频率提取
def DCT2frequency(image, c, w):
    image = image.reshape(c, int(3*w*w/64), 8, 8)
    image = image.reshape(c, 3, int(w*w/64), 64)
    image = image.permute(0, 1, 3, 2)
    image = image.reshape(c, 3, w, w)
    return image

# 逆操作
def frequency2DCT(image, c, w):
    ima = image.reshape(c, 3, 64, int(w * w / 64))
    ima = ima.permute(0, 1, 3, 2)
    ima = ima.reshape(c, int(3 * w * w / 64), 8, 8)
    ima = ima.reshape(c, 3, w, w)
    return ima
