# -*- coding:utf-8 -*-
import csv
import os

from options import HiDDenConfiguration
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import utils
from model.discriminator import Discriminator
import numpy as np
from model.encoder_decoder import EncoderDecoder
from noise_layers.colorjitter import ColorJitter
from noise_layers.gaussian_noise import Gaussian_Noise
from noise_layers.salt_pepper_noise import SP
from noise_layers.jpeg import Jpeg, JpegSS, JpegMask
import HiNet.modules.Unet_common as common
import HiNet.config as c
from collections import OrderedDict
def guide_loss(output, bicubic_image):
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=False)
    loss = loss_fn(output, bicubic_image)
    return loss.cuda()


def reconstruction_loss(rev_input, input):
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=False)
    loss = loss_fn(rev_input, input)
    return loss.cuda()


def low_frequency_loss(ll_input, gt_input):
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=False)
    loss = loss_fn(ll_input, gt_input)
    return loss.cuda()

def gauss_noise(shape):
    noise = torch.zeros(shape).cuda()
    for i in range(noise.shape[0]):
        noise[i] = torch.randn(noise[i].shape).cuda()

    return noise

def remove_module_prefix(state_dict):
    """
    移除 state_dict 中的 'module.' 前缀（来自 DataParallel）
    """
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        new_key = k.replace("module.", "") if k.startswith("module.") else k
        new_state_dict[new_key] = v
    return new_state_dict
def load(net, name):
    state_dicts = torch.load(name)
    network_state_dict = {k: v for k, v in state_dicts['net'].items() if 'tmp_var' not in k}
    net.load_state_dict(remove_module_prefix(network_state_dict))

def test(train_loader, test_loader, messages_length, num_epochs, model_dir, log_file, save_image_dir):

    #################
    ###   Model   ###
    #################
    encoder_decoder = EncoderDecoder().cuda()
    load(encoder_decoder.model, "/home/zzc/simulator_noisy/HiNet_Bit_TRDH/best.pt")

    #################
    ###   noise   ###
    #################
    distortion = 'Brightness'   # 亮度
    # distortion = 'Contrast'   # 对比度
    # distortion = 'Saturation'   # 饱和度
    # distortion = 'Hue'   # 色相
    ASL = ColorJitter(distortion).cuda()
    # ASL = Gaussian_Noise().cuda()
    # ASL = SP().cuda()
    # ASL = Jpeg(Q=90).cuda()
    dwt = common.DWT()
    iwt = common.IWT()
            

    with open(log_file, "w") as f:
        writer = csv.writer(f)
        writer.writerow(["Epoch", 
                "testLoss", "test_encoder_loss", "test_decoder_loss", 
                "advLoss", "bit_arr", "psnr", "ssim"])

    
        # 测试阶段
        encoder_decoder.eval()

        test_encoder_loss = 0.0
        test_decoder_loss = 0.0
        test_psnr = 0.0
        test_ssim = 0.0
        test_bit_arr = 0.0
        test_running_loss = 0.0
        dis_running_loss = 0.0
        is_first_save = True
        with torch.no_grad():
            for images, _ in test_loader:
                images = images.cuda()

                messages = torch.Tensor(np.random.choice([0, 1], size=[images.size(0), 1, messages_length, messages_length])).cuda()
                # 复制成 3 通道（为 HiNet 的输入准备）
                messages = messages.repeat(1, 3, 1, 1)
                cover_input = dwt(images)
                secret_input = dwt(messages)
                input_img = torch.cat((cover_input, secret_input), 1)
                output = encoder_decoder.model(input_img)
                output_steg = output.narrow(1, 0, 4 * c.channels_in)
                output_z = output.narrow(1, 4 * c.channels_in, output.shape[1] - 4 * c.channels_in)
                encoder_image = iwt(output_steg)

                noise_image = ASL(encoder_image) 

                output_z_guass = gauss_noise(output_z.shape)
                dwt_steg = dwt(noise_image)                    
                output_rev = torch.cat((dwt_steg, output_z_guass), 1)
                output_image = encoder_decoder.model(output_rev, rev=True)

                secret_rev = output_image.narrow(1, 4 * c.channels_in, output_image.shape[1] - 4 * c.channels_in)
                decoder_message = iwt(secret_rev)
                decoder_message = torch.clamp(decoder_message, 0, 1)  # 确保解码输出在[0,1]范围内 

                g_loss = guide_loss(encoder_image.cuda(), images.cuda())
                r_loss = reconstruction_loss(decoder_message, messages)
                output_steg = dwt(encoder_image)
                cover_input = dwt(images)
                steg_low = output_steg.narrow(1, 0, c.channels_in)
                cover_low = cover_input.narrow(1, 0, c.channels_in)
                l_loss = low_frequency_loss(steg_low, cover_low)
            
                loss = c.lamda_reconstruction * r_loss + c.lamda_guide * g_loss + c.lamda_low_frequency * l_loss

                psnr, ssim = utils.val_psnr_ssim(images, encoder_image)
                test_psnr += psnr
                test_ssim += ssim

                test_encoder_loss += g_loss.item()
                test_decoder_loss += r_loss.item()
                test_running_loss += loss.item()

                if is_first_save:  # 只保存第一个批次的图像
                    is_first_save = False
                    save_images(images.cpu()[:8, :, :, :],
                                encoder_image[:8, :, :, :].cpu(), noise_image[:8, :, :, :].cpu(), 0, save_image_dir)
                decoded_rounded = decoder_message.detach().cpu().numpy().round().clip(0, 1)
                bitwise_err = np.sum(np.abs(decoded_rounded - messages.detach().cpu().numpy())) / (messages.shape[0] * messages.shape[1] * messages.shape[2] * messages.shape[3])
                test_bit_arr += bitwise_err

        test_epoch_encoder_loss = test_encoder_loss / len(test_loader)
        test_epoch_decoder_loss = test_decoder_loss / len(test_loader)
        test_epoch_loss = test_running_loss / len(test_loader)
        dis_epoch_loss = dis_running_loss / len(test_loader)
        psnr = test_psnr / len(test_loader)
        ssim = test_ssim / len(test_loader)
        bit_arr = test_bit_arr / len(test_loader)


        writer.writerow([
            0, 
            f"{test_epoch_loss:.4f}", 
            f"{test_epoch_encoder_loss:.4f}", 
            f"{test_epoch_decoder_loss:.4f}", 
            f"{dis_epoch_loss:.4f}", 
            f"{bit_arr:.4f}", 
            f"{psnr:.2f}", 
            f"{ssim:.2f}"
        ])
        f.flush()
        print(f"Epoch [{0}/{num_epochs}], "
                f"Test Loss: {test_epoch_loss:.4f} (Encoder: {test_epoch_encoder_loss:.4f}, Decoder: {test_epoch_decoder_loss:.4f}), "
                f"Adv Loss: {dis_epoch_loss:.4f}, Bit arr: {bit_arr:.4f}, Psnr: {psnr}, Ssim: {ssim}")
        

    print(f"Training log saved to {log_file}.")
    print("Training finished.")

def save_images(cover_images, encoder_images, noise_image, epoch, folder):
    # 将数据转移到 CPU
    images = cover_images.cpu()
    encoder_images = encoder_images.cpu()
    noise_image = noise_image.cpu()

    # 计算误差放大
    revise_image = torch.clamp(torch.abs(images - encoder_images) * 5, 0, 1)
    revise_image_5 = torch.clamp(torch.abs(images - noise_image) * 5, 0, 1)

    # 拼接图像
    stacked_images = torch.cat([images, encoder_images, noise_image, revise_image, revise_image_5], dim=0)

    # 保存图片
    filename = os.path.join(folder, 'epoch-{}.png'.format(epoch + 1))
    torchvision.utils.save_image(stacked_images, filename, normalize=False)