import numpy as np
import os
import re
import csv
import time
import pickle
import logging
import cv2
from PIL import Image

import torch
from torchvision import datasets, transforms
import torchvision.utils
from torch.utils import data
import torch.nn.functional as F
import torch.nn as nn

from options import HiDDenConfiguration, TrainingOptions
from model.hidden import Hidden

# from skimage.measure import compare_ssim, compare_psnr
from skimage.metrics import structural_similarity as compare_ssim
from skimage.metrics import peak_signal_noise_ratio as compare_psnr


# 图片转张量
def image_to_tensor(image):
    """
    Transforms a numpy-image into torch tensor
    :param image: (batch_size x height x width x channels) uint8 array
    :return: (batch_size x channels x height x width) torch tensor in range [-1.0, 1.0]
    """
    image_tensor = torch.Tensor(image)
    image_tensor.unsqueeze_(0)
    image_tensor = image_tensor.permute(0, 3, 1, 2)
    image_tensor = image_tensor / 127.5 - 1
    return image_tensor

# 张量转图片
def tensor_to_image(tensor):
    """
    Transforms a torch tensor into numpy uint8 array (image)
    :param tensor: (batch_size x channels x height x width) torch tensor in range [-1.0, 1.0]
    :return: (batch_size x height x width x channels) uint8 array
    """
    image = tensor.permute(0, 2, 3, 1).cpu().numpy()
    image = (image + 1) * 127.5
    return np.clip(image, 0, 255).astype(np.uint8)

# 保存图片
def save_images(original_images, watermarked_images, epoch, folder, resize_to=None):
    images = original_images[:original_images.shape[0], :, :, :].cpu()
    watermarked_images = watermarked_images[:watermarked_images.shape[0], :, :, :].cpu()

    # scale values to range [0, 1] from original range of [-1, 1]
    images = (images + 1) / 2
    watermarked_images = (watermarked_images + 1) / 2
    revise_image = abs(images - watermarked_images)
    revise_image_10 = revise_image * 2.0
    if resize_to is not None:
        images = F.interpolate(images, size=resize_to)
        watermarked_images = F.interpolate(watermarked_images, size=resize_to)
        revise_image = F.interpolate(revise_image, size=resize_to)
        revise_image_10 = F.interpolate(revise_image_10, size=resize_to)
    stacked_images = torch.cat([images, watermarked_images, revise_image, revise_image_10], dim=0)
    filename = os.path.join(folder, 'epoch-{}.png'.format(epoch))
    # torchvision.utils.save_image(stacked_images, filename=filename, original_images.shape[0], normalize=False)
    torchvision.utils.save_image(stacked_images, filename, normalize=False)

# 优化排序
def sorted_nicely(l):
    """ Sort the given iterable in the way that humans expect."""
    convert = lambda text: int(text) if text.isdigit() else text
    alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
    return sorted(l, key=alphanum_key)

# 文件夹的最后一个检查点
def last_checkpoint_from_folder(folder: str):
    last_file = sorted_nicely(os.listdir(folder))[-1]
    last_file = os.path.join(folder, last_file)
    return last_file

# 模型保存
def save_checkpoint(model: Hidden, experiment_name: str, epoch: int, checkpoint_folder: str):
    """ Saves a checkpoint at the end of an epoch. """
    if not os.path.exists(checkpoint_folder):
        os.makedirs(checkpoint_folder)

    checkpoint_filename = f'{experiment_name}--epoch-{epoch}.pyt'
    checkpoint_filename = os.path.join(checkpoint_folder, checkpoint_filename)
    logging.info('Saving checkpoint to {}'.format(checkpoint_filename))
    checkpoint = {
        'enc-dec-model': model.encoder_decoder.state_dict(),
        'enc-dec-optim': model.optimizer_enc_dec.state_dict(),
        'discrim-model': model.discriminator.state_dict(),
        'discrim-optim': model.optimizer_discrim.state_dict(),
        'epoch': epoch
    }
    torch.save(checkpoint, checkpoint_filename)
    logging.info('Saving checkpoint done.')


# def load_checkpoint(hidden_net: Hidden, options: Options, this_run_folder: str):
def load_last_checkpoint(checkpoint_folder):
    """ Load the last checkpoint from the given folder """
    last_checkpoint_file = last_checkpoint_from_folder(checkpoint_folder)
    checkpoint = torch.load(last_checkpoint_file)

    return checkpoint, last_checkpoint_file

# 模型导入
def model_from_checkpoint(hidden_net, checkpoint):
    """ Restores the hidden_net object from a checkpoint object """
    hidden_net.encoder_decoder.load_state_dict(checkpoint['enc-dec-model'])
    hidden_net.optimizer_enc_dec.load_state_dict(checkpoint['enc-dec-optim'])
    hidden_net.discriminator.load_state_dict(checkpoint['discrim-model'])
    hidden_net.optimizer_discrim.load_state_dict(checkpoint['discrim-optim'])

# 加载选项，-> 表示函数返回的类型
def load_options(options_file_name) -> (TrainingOptions, HiDDenConfiguration, dict):
    """ Loads the training, model, and noise configurations from the given folder """
    with open(os.path.join(options_file_name), 'rb') as f:
        train_options = pickle.load(f)
        noise_config = pickle.load(f)
        hidden_config = pickle.load(f)
        # for backward-capability. Some models were trained and saved before .enable_fp16 was added
        if not hasattr(hidden_config, 'enable_fp16'):
            setattr(hidden_config, 'enable_fp16', False)

    return train_options, hidden_config, noise_config

# 获取数据加载器
def get_data_loaders(hidden_config: HiDDenConfiguration, train_options: TrainingOptions):
    """ Get torch data loaders for training and validation. The data loaders take a crop of the image,
    transform it into tensor, and normalize it."""
    '''
        transforms:
            RandomCrop:在随机位置进行裁剪
            ToTensor:能够把灰度范围从0-255变换到0-1之间
            Normalize:用均值和标准差归一化张量图像
    '''
    data_transforms = {
        'train': transforms.Compose([
            transforms.RandomCrop((hidden_config.H, hidden_config.W), pad_if_needed=True),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ]),
        'test': transforms.Compose([
            transforms.CenterCrop((hidden_config.H, hidden_config.W)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
    }

    # 导入训练数据
    """
    torch.utils.data.DataLoader(train_images, batch_size=train_options.batch_size, shuffle=True,
                                               num_workers=4)
    train_image: dataset 数据集
    batch_size:每个batch加载多少个样本
    shuffle:设置为true会在每个epoch重新打乱数据（默认false）
    num_workers:用多少个子进程加载数据。
    """
    train_images = datasets.ImageFolder(train_options.train_folder, data_transforms['train'])
    train_loader = torch.utils.data.DataLoader(train_images, batch_size=train_options.batch_size, shuffle=True,
                                               num_workers=4)
    # 导入测试数据
    validation_images = datasets.ImageFolder(train_options.validation_folder, data_transforms['test'])
    validation_loader = torch.utils.data.DataLoader(validation_images, batch_size=train_options.batch_size,
                                                    shuffle=False, num_workers=4)
    return train_loader, validation_loader

# 日志进度
def log_progress(losses_accu):
    log_print_helper(losses_accu, logging.info)

# 打印进度
def print_progress(losses_accu):
    log_print_helper(losses_accu, print)

# 日志打印
def log_print_helper(losses_accu, log_or_print_func):
    max_len = max([len(loss_name) for loss_name in losses_accu])
    for loss_name, loss_value in losses_accu.items():
        log_or_print_func(loss_name.ljust(max_len + 4) + '{:.4f}'.format(loss_value.avg))

# 创建运行文件夹
def create_folder_for_run(runs_folder, experiment_name):
    if not os.path.exists(runs_folder):
        os.makedirs(runs_folder)

    this_run_folder = os.path.join(runs_folder, f'{experiment_name} {time.strftime("%Y.%m.%d--%H-%M-%S")}')

    os.makedirs(this_run_folder)
    os.makedirs(os.path.join(this_run_folder, 'checkpoints'))
    os.makedirs(os.path.join(this_run_folder, 'images'))

    return this_run_folder

# 写入训练数据
def write_losses(file_name, losses_accu, epoch, duration):
    with open(file_name, 'a', newline='') as csvfile:
        writer = csv.writer(csvfile)
        if epoch == 1:
            row_to_write = ['epoch'] + [loss_name.strip() for loss_name in losses_accu.keys()] + ['duration']
            writer.writerow(row_to_write)
        row_to_write = [epoch] + ['{:.4f}'.format(loss_avg.avg) for loss_avg in losses_accu.values()] + [
            '{:.0f}'.format(duration)]
        writer.writerow(row_to_write)
# 写入测试psnr和ssim
def write_psnr_ssim(file_name, epoch, psnr, ssim):
     with open(file_name, 'a', newline='') as csvfile:
        writer = csv.writer(csvfile)
        if epoch == 1:
            row_to_write = ['epoch'] + ['PSNR'] + ['SSIM']
            writer.writerow(row_to_write)
        row_to_write = [epoch] + [psnr] + [ssim]
        writer.writerow(row_to_write)

# tensor 保存为cv2
def image_tensor2cv2(input_tensor: torch.Tensor):
    """
    将tensor保存为cv2格式
    :param input_tensor: 要保存的tensor
    :param filename: 保存的文件名
    """
    assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1)
    # 复制一份
    rgb_tensor = input_tensor.clone().detach()
    # 到cpu
    rgb_tensor = rgb_tensor.to(torch.device('cpu'))
    # 反归一化
    rgb_tensor = unnormalize(rgb_tensor)
    # 去掉批次维度
    rgb_tensor = rgb_tensor.squeeze()
    # 从[0,1]转化为[0,255]，再从CHW转为HWC，最后转为cv2
    rgb_tensor = rgb_tensor.mul_(255).add_(0.5).clamp_(
        0, 255).permute(1, 2, 0).type(torch.uint8).numpy()
    
    return rgb_tensor

# tensor保存pillow
def image_tensor2pillow(input_tensor: torch.Tensor):
    """
    将tensor保存为pillow
    :param input_tensor: 要保存的tensor
    :param filename: 保存的文件名
    """
    assert (len(input_tensor.shape) == 4 and input_tensor.shape[0] == 1)
    # 复制一份
    input_tensor = input_tensor.clone().detach()
    # 到cpu
    input_tensor = input_tensor.to(torch.device('cpu'))
    # 反归一化
    input_tensor = unnormalize(input_tensor)
    # 去掉批次维度
    input_tensor = input_tensor.squeeze()
    # 从[0,1]转化为[0,255]，再从CHW转为HWC，最后转为numpy
    input_tensor = input_tensor.mul_(255).add_(0.5).clamp_(
        0, 255).permute(1, 2, 0).type(torch.uint8).numpy()
    # 转成pillow
    im = Image.fromarray(input_tensor)
    
    return im

# 反归一化
def unnormalize(tensor: torch.Tensor, inplace: bool = False) -> torch.Tensor:
    """Unnormalize a tensor image with mean and standard deviation.

    Args:
        tensor (Tensor): Tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channel.
        inplace(bool,optional): Bool to make this operation inplace.

    Returns:
        Tensor: Normalized Tensor image.
    """
    mean = [0.5, 0.5, 0.5]
    std = [0.5, 0.5, 0.5]
    if not isinstance(tensor, torch.Tensor):
        raise TypeError(
            'Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))

    if tensor.ndim < 3:
        raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
                         '{}.'.format(tensor.size()))

    if not inplace:
        tensor = tensor.clone()

    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
   
    if (std == 0).any():
        raise ValueError(
            'std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
    if mean.ndim == 1:
        mean = mean.view(-1, 1, 1)
    if std.ndim == 1:
        std = std.view(-1, 1, 1)

    tensor.mul_(std).add_(mean)
    return tensor


# 计算psnr和ssim
def psnr_and_ssim(image1, image2):
        psnr = compare_psnr(image1, image2)
        ssim = (compare_ssim(image1[:, :, 0], image2[:, :, 0]) + compare_ssim(image1[:, :, 1],
                                                                              image2[:, :, 1]) + compare_ssim(image1[:, :, 2], image2[:, :, 2])) / 3
        return psnr, ssim


# jpeg 压缩和解压缩
def encode_jpeg_to_decode_jpeg(image, ratio=95):
    # encode
    # 取值范围：0~100，数值越小，压缩比越高，图片质量损失越严重
    params = [cv2.IMWRITE_JPEG_QUALITY, ratio]  # ratio:0~100
    msg = cv2.imencode('.jpg', image, params)[1]
    msg = (np.array(msg)).tobytes()
    # decode
    img = cv2.imdecode(np.frombuffer(msg, np.uint8), cv2.IMREAD_COLOR)
    # 保存img
    # cv2.imwrite('test.jpg', img)
    return img

# pillow 图像进行转换tensor，归一化
def pillow_image_to_tensor_normalize(image):
    # pill_image = image_tensor2pillow(image)
    data_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    pill_image = data_transforms(image)
    return pill_image

# 增加Jpeg压缩
def image_add_jpeg(image):
    jpeg_image = None
    psnr = 0
    ssim = 0
    for i in range(image.shape[0]):
        cv_image = image_tensor2cv2(image[i].unsqueeze(0))
        cv_image_jpeg = encode_jpeg_to_decode_jpeg(cv_image)
        # 计算psnr和ssim
        psnr_, ssim_ = psnr_and_ssim(cv_image, cv_image_jpeg)
        psnr += psnr_
        ssim += ssim_
        # 转成pillow
        im = Image.fromarray(cv_image_jpeg)
        im = pillow_image_to_tensor_normalize(im)
        im = im.unsqueeze(0)
        if i == 0:
            jpeg_image = im
        else:
            jpeg_image = torch.cat([jpeg_image, im], dim=0)
    jpeg_image = jpeg_image.cuda()
    return jpeg_image, psnr/image.shape[0], ssim/image.shape[0]

# 计算测试图片的psnr和ssim
def val_psnr_ssim(image, encode_image):
    psnr = 0
    ssim = 0
    for i in range(image.shape[0]):
        im = image_tensor2cv2(image[i].unsqueeze(0))
        encode_im = image_tensor2cv2(encode_image[i].unsqueeze(0))
        psnr_, ssim_ = psnr_and_ssim(im, encode_im)
        psnr += psnr_
        ssim += ssim_
    return psnr/image.shape[0], ssim/image.shape[0]


"""
    图像DCT操作和IDCT操作
"""
# 量化
def std_quantization(image_yuv_dct, scale_factor, round_func=torch.round):
    luminance_quant_tbl = (torch.tensor([
        [16, 11, 10, 16, 24, 40, 51, 61],
        [12, 12, 14, 19, 26, 58, 60, 55],
        [14, 13, 16, 24, 40, 57, 69, 56],
        [14, 17, 22, 29, 51, 87, 80, 62],
        [18, 22, 37, 56, 68, 109, 103, 77],
        [24, 35, 55, 64, 81, 104, 113, 92],
        [49, 64, 78, 87, 103, 121, 120, 101],
        [72, 92, 95, 98, 112, 100, 103, 99]
    ], dtype=torch.float) * scale_factor).round().to(image_yuv_dct.device).clamp(min=1).repeat(
        image_yuv_dct.shape[2] // 8, image_yuv_dct.shape[3] // 8)

    chrominance_quant_tbl = (torch.tensor([
        [17, 18, 24, 47, 99, 99, 99, 99],
        [18, 21, 26, 66, 99, 99, 99, 99],
        [24, 26, 56, 99, 99, 99, 99, 99],
        [47, 66, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99]
    ], dtype=torch.float) * scale_factor).round().to(image_yuv_dct.device).clamp(min=1).repeat(
        image_yuv_dct.shape[2] // 8, image_yuv_dct.shape[3] // 8)

    q_image_yuv_dct = image_yuv_dct.clone()
    q_image_yuv_dct[:, :1, :, :] = image_yuv_dct[:, :1, :, :] / luminance_quant_tbl
    q_image_yuv_dct[:, 1:, :, :] = image_yuv_dct[:, 1:, :, :] / chrominance_quant_tbl
    q_image_yuv_dct_round = round_func(q_image_yuv_dct)
    return q_image_yuv_dct_round


# 反量化
def std_reverse_quantization(q_image_yuv_dct, scale_factor):
    luminance_quant_tbl = (torch.tensor([
        [16, 11, 10, 16, 24, 40, 51, 61],
        [12, 12, 14, 19, 26, 58, 60, 55],
        [14, 13, 16, 24, 40, 57, 69, 56],
        [14, 17, 22, 29, 51, 87, 80, 62],
        [18, 22, 37, 56, 68, 109, 103, 77],
        [24, 35, 55, 64, 81, 104, 113, 92],
        [49, 64, 78, 87, 103, 121, 120, 101],
        [72, 92, 95, 98, 112, 100, 103, 99]
    ], dtype=torch.float) * scale_factor).round().to(q_image_yuv_dct.device).clamp(min=1).repeat(
        q_image_yuv_dct.shape[2] // 8, q_image_yuv_dct.shape[3] // 8)

    chrominance_quant_tbl = (torch.tensor([
        [17, 18, 24, 47, 99, 99, 99, 99],
        [18, 21, 26, 66, 99, 99, 99, 99],
        [24, 26, 56, 99, 99, 99, 99, 99],
        [47, 66, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99],
        [99, 99, 99, 99, 99, 99, 99, 99]
    ], dtype=torch.float) * scale_factor).round().to(q_image_yuv_dct.device).clamp(min=1).repeat(
        q_image_yuv_dct.shape[2] // 8, q_image_yuv_dct.shape[3] // 8)

    image_yuv_dct = q_image_yuv_dct.clone()
    image_yuv_dct[:, :1, :, :] = q_image_yuv_dct[:, :1, :, :] * luminance_quant_tbl
    image_yuv_dct[:, 1:, :, :] = q_image_yuv_dct[:, 1:, :, :] * chrominance_quant_tbl
    return image_yuv_dct

# DCT变换
def dct(image):
    # coff for dct and idct
    coff = torch.zeros((8, 8), dtype=torch.float).to(image.device)
    coff[0, :] = 1 * np.sqrt(1 / 8)
    for i in range(1, 8):
        for j in range(8):
            coff[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * 8)) * np.sqrt(2 / 8)

    split_num = image.shape[2] // 8
    image_dct = torch.cat(torch.cat(image.split(8, 2), 0).split(8, 3), 0)
    image_dct = torch.matmul(coff, image_dct)
    image_dct = torch.matmul(image_dct, coff.permute(1, 0))
    image_dct = torch.cat(torch.cat(image_dct.chunk(split_num, 0), 3).chunk(split_num, 0), 2)
    return image_dct

# IDCT变换
def idct(image_dct):
    # coff for dct and idct
    coff = torch.zeros((8, 8), dtype=torch.float).to(image_dct.device)
    coff[0, :] = 1 * np.sqrt(1 / 8)
    for i in range(1, 8):
        for j in range(8):
            coff[i, j] = np.cos(np.pi * i * (2 * j + 1) / (2 * 8)) * np.sqrt(2 / 8)

    split_num = image_dct.shape[2] // 8
    image = torch.cat(torch.cat(image_dct.split(8, 2), 0).split(8, 3), 0)
    image = torch.matmul(coff.permute(1, 0), image)
    image = torch.matmul(image, coff)
    image = torch.cat(torch.cat(image.chunk(split_num, 0), 3).chunk(split_num, 0), 2)
    return image

# RGB2YUV
def rgb2yuv(image_rgb):
    image_yuv = torch.empty_like(image_rgb)
    image_yuv[:, 0:1, :, :] = 0.299 * image_rgb[:, 0:1, :, :] \
                              + 0.587 * image_rgb[:, 1:2, :, :] + 0.114 * image_rgb[:, 2:3, :, :]
    image_yuv[:, 1:2, :, :] = -0.1687 * image_rgb[:, 0:1, :, :] \
                              - 0.3313 * image_rgb[:, 1:2, :, :] + 0.5 * image_rgb[:, 2:3, :, :]
    image_yuv[:, 2:3, :, :] = 0.5 * image_rgb[:, 0:1, :, :] \
                              - 0.4187 * image_rgb[:, 1:2, :, :] - 0.0813 * image_rgb[:, 2:3, :, :]
    return image_yuv

# YUV2RGB
def yuv2rgb(image_yuv):
    image_rgb = torch.empty_like(image_yuv)
    image_rgb[:, 0:1, :, :] = image_yuv[:, 0:1, :, :] + 1.40198758 * image_yuv[:, 2:3, :, :]
    image_rgb[:, 1:2, :, :] = image_yuv[:, 0:1, :, :] - 0.344113281 * image_yuv[:, 1:2, :, :] \
                              - 0.714103821 * image_yuv[:, 2:3, :, :]
    image_rgb[:, 2:3, :, :] = image_yuv[:, 0:1, :, :] + 1.77197812 * image_yuv[:, 1:2, :, :]
    return image_rgb

def yuv_dct(image):
    # clamp and convert from [-1,1] to [0,255]
    # image = (image.clamp(-1, 1) + 1) * 255 / 2
    image = image * 255

    # pad the image so that we can do dct on 8x8 blocks
    pad_height = (8 - image.shape[2] % 8) % 8
    pad_width = (8 - image.shape[3] % 8) % 8
    image = nn.ZeroPad2d((0, pad_width, 0, pad_height))(image)

    # convert to yuv
    image_yuv = rgb2yuv(image)

    assert image_yuv.shape[2] % 8 == 0
    assert image_yuv.shape[3] % 8 == 0

    # apply dct
    image_dct = dct(image_yuv)

    return image_dct, pad_width, pad_height

def idct_rgb(image_dct, pad_width, pad_height):
    # apply inverse dct (idct)
    image_idct = idct(image_dct)

    # transform from yuv to rgb
    image_ret_padded = yuv2rgb(image_idct)

    # un-pad
    image_rgb = image_ret_padded[:, :, :image_ret_padded.shape[2] - pad_height,
                :image_ret_padded.shape[3] - pad_width].clone()

    return image_rgb / 255


def forward1(image):
    Q = 95
    scale_factor = 2 - Q * 0.02 if Q >= 50 else 50 / Q
    image_dct, pad_width, pad_height = yuv_dct(image)
    image_quantization = std_quantization(image_dct, scale_factor)

    return image_quantization, pad_width, pad_height


def forward2(image_dct, pad_width, pad_height):
    Q = 95
    scale_factor = 2 - Q * 0.02 if Q >= 50 else 50 / Q
    image_quantization = std_reverse_quantization(image_dct, scale_factor)
    image = idct_rgb(image_quantization, pad_width, pad_height)

    return image

"""
    DCT频率提取与还原(包含拼接)
"""
# 频率提取
def DCT2frequency(image, c, w):
    image = image.reshape(c, int(3*w*w/64), 8, 8)
    image = image.reshape(c, 3, int(w*w/64), 64)
    image = image.permute(0, 1, 3, 2)
    image = image.reshape(c, 3, w, w)
    return image

# 逆操作
def frequency2DCT(image, c, w):
    ima = image.reshape(c, 3, 64, int(w * w / 64))
    ima = ima.permute(0, 1, 3, 2)
    ima = ima.reshape(c, int(3 * w * w / 64), 8, 8)
    ima = ima.reshape(c, 3, w, w)
    return ima

"""
    DCT频率提取与还原（拆分64通道）
"""
# 频率提取
def DCT2frequency2(image, c, w):
    image = image.reshape(c, 3, int(w*w/64), 64)
    image = image.permute(0, 1, 3, 2)
    image = image.reshape(c, 3*64, int(w/8), int(w/8))
    return image

# 逆操作
def frequency2DCT2(image, c, w):
    ima = image.reshape(c, 3, 64, int(w * w / 64))
    ima = ima.permute(0, 1, 3, 2)
    ima = ima.reshape(c, 3, w, w)
    return ima