# -*- coding: utf-8 -*-
# -*- coding:utf-8 -*-
import csv
import os

from options import HiDDenConfiguration
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import utils
from model.discriminator import Discriminator
import numpy as np
from model.encoder_decoder import EncoderDecoder
from modules.UnetPlusPlus_L6 import UNetPlusPlusL6_Attack
from noise_layers.jpeg import Jpeg
from noise_layers.colorjitter import ColorJitter
from model.attack import Attack
from modules.UNetPPL6_OP import UNetPlusPlusL6_Attack as UNetPP_OP
import config as c
from noise_layers.gaussian_noise import Gaussian_Noise
from noise_layers.salt_pepper_noise import SP
from noise_layers.Brightness import BrightnessNoise
from collections import OrderedDict
import HiNet.modules.Unet_common as common
import HiNet.config as hc
from model.noiseGenerator import UNet
from modules.RandomPixelUNet import DistortionNet
def model_save(model, ep, save_dir):
    save_path = c.model_dir
    print(f'--- save the model @ epoch {ep} ---')

    # 创建包含模型和优化器状态的 checkpoint
    checkpoint = {
        'epoch': ep,
        'encoder_decoder_state_dict': model.encoder_decoder.state_dict(),  # 模型参数
        # 'noiser_state_dict': model.noise_layer.state_dict(),
        # 'ed_opt_state_dict': model.optimizer_ed.state_dict(),  # 优化器状态
        # 'dis_opt_state_dict':model.optimizer_dis.state_dict(),
        # 'noiser_opt_state_dict':model.optimizer_n.state_dict()
    }

    # 保存 checkpoint
    torch.save(checkpoint, save_dir)

def guide_loss(output, bicubic_image):
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=False)
    loss = loss_fn(output, bicubic_image)
    return loss.cuda()


def reconstruction_loss(rev_input, input):
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=False)
    loss = loss_fn(rev_input, input)
    return loss.cuda()


def low_frequency_loss(ll_input, gt_input):
    loss_fn = torch.nn.MSELoss(reduce=True, size_average=False)
    loss = loss_fn(ll_input, gt_input)
    return loss.cuda()

def gauss_noise(shape):
    noise = torch.zeros(shape).cuda()
    for i in range(noise.shape[0]):
        noise[i] = torch.randn(noise[i].shape).cuda()

    return noise

def remove_module_prefix(state_dict):
    """
    移除 state_dict 中的 'module.' 前缀（来自 DataParallel）
    """
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        new_key = k.replace("module.", "") if k.startswith("module.") else k
        new_state_dict[new_key] = v
    return new_state_dict
def load(net, name):
    state_dicts = torch.load(name)
    network_state_dict = {k: v for k, v in state_dicts['net'].items() if 'tmp_var' not in k}
    net.load_state_dict(remove_module_prefix(network_state_dict))

class  ED_NR(nn.Module):
    def __init__(self):
        super(ED_NR, self).__init__()
        #################
        ###   Model   ###
        #################
        self.encoder_decoder = EncoderDecoder().cuda()
        load(self.encoder_decoder.model, "/home/zzc/simulator_noisy/FRDS-HiNet/best.pt")
        self.epoch = 0

        self.noise_layer = DistortionNet().cuda()
        self.log_sigma_asl = nn.Parameter(torch.zeros(1, device="cuda"))
        self.log_sigma_sml = nn.Parameter(torch.zeros(1, device="cuda"))
        
        ############setting lr################
        log10_lr = -5.0
        lr = 10 ** log10_lr
        #################
        ###    opt    ###
        #################
        self.optimizer_ed = optim.Adam(
            list(self.encoder_decoder.parameters()) + [self.log_sigma_asl, self.log_sigma_sml],
            lr=lr
        )
        
        self.optimizer_n = optim.Adam(self.noise_layer.parameters(), lr=0.0001)

        ################
        ###   Loss   ###
        ################
        self.mse_loss = nn.MSELoss().cuda()
        self.L1 = nn.L1Loss().cuda()
        self.bce_with_logits_loss = nn.BCEWithLogitsLoss()

        #################
        ###   noise   ###
        #################
        # distortion = 'Brightness'   # 亮度
        # distortion = 'Contrast'   # 对比度
        # distortion = 'Hue'   # 色相
        # distortion = 'Saturation'   # 饱和度
        # self.ASL = ColorJitter(distortion).cuda()
        # self.ASL = Gaussian_Noise().cuda()
        self.ASL = SP().cuda()
        # self.ASL = Jpeg(Q=90).cuda()
        # self.ASL = BrightnessNoise().cuda()
        self.dwt = common.DWT()
        self.iwt = common.IWT()


        self.train_noiser_loss = 0.0
        self.train_ED_loss_ASL = 0.0
        self.train_ED_loss_SML = 0.0
        self.train_ED_loss = 0.0

        with open(c.log_file, "w") as f:
            writer = csv.writer(f)
            writer.writerow(["Epoch", "trainLoss", "train_loss_ASL", "train_loss_SML", 
                    "testLoss", "test_encoder_loss", "test_decoder_loss", 
                    "bit_arr", "psnr", "ssim", "train_noiser_loss", "test_noiser_loss",
                    "w_asl", "w_sml"])
        self.min_testLoss = float("INF")

    def res_init(self):

        self.train_noiser_loss = 0.0
        self.train_ED_loss_ASL = 0.0
        self.train_ED_loss_SML = 0.0
        self.train_ED_loss = 0.0
        
    def update_encoder_decoder_ASL_And_SML(self, images):

        self.encoder_decoder.train()
        self.noise_layer.eval()


        images = images.cuda()
            
        messages = torch.Tensor(np.random.choice([0, 1], size=[images.size(0), 1, c.message_length, c.message_length])).cuda()
        # 复制成 3 通道（为 HiNet 的输入准备）
        messages = messages.repeat(1, 3, 1, 1)   

        cover_input = self.dwt(images)
        secret_input = self.dwt(messages)
        input_img = torch.cat((cover_input, secret_input), 1)           

        # 修改Forward ASL部分，添加数值保护
        with torch.enable_grad():
            output = self.encoder_decoder.model(input_img)
            output_steg = output.narrow(1, 0, 4 * hc.channels_in)
            output_z = output.narrow(1, 4 * hc.channels_in, output.shape[1] - 4 * hc.channels_in)
            encoder_image = self.iwt(output_steg)  
            encoder_image = torch.clamp(encoder_image, 0, 1)  # 确保编码输出在[-1,1]范围内

        with torch.no_grad():
            attack_image = self.ASL(encoder_image)
            attack_image = torch.clamp(attack_image, 0, 1)  # 确保ASL输出在有效范围内
            gap_image = attack_image - encoder_image
            gap_image = gap_image.detach()

        with torch.enable_grad():
            noise_image = encoder_image + gap_image
            noise_image = torch.clamp(noise_image, 0, 1)  # 确保噪声图像在有效范围内
            output_z_guass = gauss_noise(output_z.shape)
            dwt_steg = self.dwt(noise_image)                    
            output_rev = torch.cat((dwt_steg, output_z_guass), 1)
            output_image = self.encoder_decoder.model(output_rev, rev=True)

            secret_rev = output_image.narrow(1, 4 * hc.channels_in, output_image.shape[1] - 4 * hc.channels_in)
            decoder_message = self.iwt(secret_rev)
            decoder_message = torch.clamp(decoder_message, 0, 1)  # 确保解码输出在[0,1]范围内
    
            # 训练encoder_decoder
            g_loss = guide_loss(encoder_image.cuda(), images.cuda())
            r_loss = reconstruction_loss(decoder_message, messages)
            output_steg = self.dwt(encoder_image)
            cover_input = self.dwt(images)
            steg_low = output_steg.narrow(1, 0, hc.channels_in)
            cover_low = cover_input.narrow(1, 0, hc.channels_in)
            l_loss = low_frequency_loss(steg_low, cover_low)
        
            lossASL = hc.lamda_reconstruction * r_loss + hc.lamda_guide * g_loss + hc.lamda_low_frequency * l_loss
            
            
            # SML Training
            attack_image = self.noise_layer(encoder_image)

            output_z_guass = gauss_noise(output_z.shape)
            dwt_steg = self.dwt(attack_image)                    
            output_rev = torch.cat((dwt_steg, output_z_guass), 1)
            output_image = self.encoder_decoder.model(output_rev, rev=True)

            secret_rev = output_image.narrow(1, 4 * hc.channels_in, output_image.shape[1] - 4 * hc.channels_in)
            decoder_message = self.iwt(secret_rev)
            decoder_message = torch.clamp(decoder_message, 0, 1)  # 确保解码输出在[0,1]范围内 

            r_loss = reconstruction_loss(decoder_message, messages)        
            lossSML = hc.lamda_reconstruction * r_loss + hc.lamda_guide * g_loss + hc.lamda_low_frequency * l_loss           
    
            loss = (torch.exp(-2 * self.log_sigma_asl)  * lossASL +
                    torch.exp(-2 * self.log_sigma_sml) * lossSML +
                    (self.log_sigma_asl + self.log_sigma_sml))
           
            self.optimizer_ed.zero_grad()
            loss.backward()
            self.optimizer_ed.step()

            self.train_ED_loss_ASL += lossASL.item()
            self.train_ED_loss_SML += lossSML.item()
            self.train_ED_loss += loss.item()


    def update_noiser(self, images):
        self.noise_layer.train()
        self.encoder_decoder.eval()
        images = images.cuda()
            
        messages = torch.Tensor(np.random.choice([0, 1], size=[images.size(0), 1, hc.message_length, hc.message_length])).cuda()
        # 复制成 3 通道（为 HiNet 的输入准备）
        messages = messages.repeat(1, 3, 1, 1)   

        cover_input = self.dwt(images)
        secret_input = self.dwt(messages)
        input_img = torch.cat((cover_input, secret_input), 1) 
        output = self.encoder_decoder.model(input_img)
        output_steg = output.narrow(1, 0, 4 * hc.channels_in)
        output_z = output.narrow(1, 4 * hc.channels_in, output.shape[1] - 4 * hc.channels_in)
        stego = self.iwt(output_steg)  
        stego = torch.clamp(stego, 0, 1)
        with torch.enable_grad():
            attack_images = self.noise_layer(stego)
            noise_images = self.ASL(stego)
            loss = self.mse_loss(attack_images, noise_images)

            self.optimizer_n.zero_grad()
            loss.backward()
            self.optimizer_n.step()
            self.train_noiser_loss += loss.item()

    def test_EDN(self, ep_num, iter_num, test_loader, isSave = True):
        train_epoch_loss_ASL = self.train_ED_loss_ASL / iter_num
        train_epoch_loss_SML = self.train_ED_loss_SML / iter_num
        train_epoch_loss = self.train_ED_loss / iter_num
        train_epoch_noiser_loss = self.train_noiser_loss / iter_num

        is_first_save = True
        test_noiser_loss = 0.0
        test_encoder_loss = 0.0
        test_decoder_loss = 0.0
        test_psnr = 0.0
        test_ssim = 0.0
        test_bit_arr = 0.0
        test_running_loss = 0.0

        # 测试阶段
        self.encoder_decoder.eval()
        self.noise_layer.eval()
        with torch.no_grad():
            with open(c.log_file, "a") as f:
                writer = csv.writer(f)
                for images, _ in test_loader:
                    images = images.cuda()

                    messages = torch.Tensor(np.random.choice([0, 1], size=[images.size(0), 1, c.message_length, c.message_length])).cuda()
                    # 复制成 3 通道（为 HiNet 的输入准备）
                    messages = messages.repeat(1, 3, 1, 1)
                    cover_input = self.dwt(images)
                    secret_input = self.dwt(messages)
                    input_img = torch.cat((cover_input, secret_input), 1)
                    output = self.encoder_decoder.model(input_img)
                    output_steg = output.narrow(1, 0, 4 * hc.channels_in)
                    output_z = output.narrow(1, 4 * hc.channels_in, output.shape[1] - 4 * hc.channels_in)
                    encoder_image = self.iwt(output_steg)

                    noise_image = self.ASL(encoder_image) 
                    attack_image = self.noise_layer(encoder_image)

                    output_z_guass = gauss_noise(output_z.shape)
                    dwt_steg = self.dwt(noise_image)                    
                    output_rev = torch.cat((dwt_steg, output_z_guass), 1)
                    output_image = self.encoder_decoder.model(output_rev, rev=True)

                    secret_rev = output_image.narrow(1, 4 * hc.channels_in, output_image.shape[1] - 4 * hc.channels_in)
                    decoder_message = self.iwt(secret_rev)
                    decoder_message = torch.clamp(decoder_message, 0, 1)  # 确保解码输出在[0,1]范围内 

                    noiser_loss = self.mse_loss(attack_image, noise_image)
                    
                    g_loss = guide_loss(encoder_image.cuda(), images.cuda())
                    r_loss = reconstruction_loss(decoder_message, messages)
                    output_steg = self.dwt(encoder_image)
                    cover_input = self.dwt(images)
                    steg_low = output_steg.narrow(1, 0, hc.channels_in)
                    cover_low = cover_input.narrow(1, 0, hc.channels_in)
                    l_loss = low_frequency_loss(steg_low, cover_low)
                
                    loss = hc.lamda_reconstruction * r_loss + hc.lamda_guide * g_loss + hc.lamda_low_frequency * l_loss

                    psnr, ssim = utils.val_psnr_ssim(images, encoder_image)
                    test_psnr += psnr
                    test_ssim += ssim

                    test_encoder_loss += g_loss.item()
                    test_decoder_loss += r_loss.item()
                    test_running_loss += loss.item()
                    test_noiser_loss += noiser_loss.item()


                    if is_first_save:  # 只保存第一个批次的图像
                        # is_first_save = False
                        save_images(images.cpu()[:8, :, :, :],
                                    encoder_image[:8, :, :, :].cpu(), noise_image[:8, :, :, :].cpu(), ep_num, c.save_image_dir)
                    decoder_message = (decoder_message > 0.5).float()  # torch 中做稳定的二值化
                    bitwise_err = torch.abs((decoder_message > 0.5).float() - messages).mean().item()
                    test_bit_arr += bitwise_err

                test_epoch_encoder_loss = test_encoder_loss / len(test_loader)
                test_epoch_decoder_loss = test_decoder_loss / len(test_loader)
                test_epoch_loss = test_running_loss / len(test_loader)
                test_epoch_noiser_loss = test_noiser_loss / len(test_loader)
                psnr = test_psnr / len(test_loader)
                ssim = test_ssim / len(test_loader)
                bit_arr = test_bit_arr / len(test_loader)

                writer.writerow([
                    ep_num, 
                    f"{train_epoch_loss:.4f}", 
                    f"{train_epoch_loss_ASL:.4f}", 
                    f"{train_epoch_loss_SML:.4f}", 
                    f"{test_epoch_loss:.4f}", 
                    f"{test_epoch_encoder_loss:.4f}", 
                    f"{test_epoch_decoder_loss:.4f}", 
                    f"{bit_arr:.4f}", 
                    f"{psnr:.2f}", 
                    f"{ssim:.2f}",
                    f"{train_epoch_noiser_loss:.4f}",
                    f"{test_epoch_noiser_loss:.4f}",
                    f"{torch.exp(-2*self.log_sigma_asl).item():.4f}",
                    f"{torch.exp(-2*self.log_sigma_sml).item():.4f}"
                ])
                f.flush()
                print(f"Epoch [{ep_num}/{c.epochs}], "
                                f"Train Loss: {train_epoch_loss:.4f} (Train ASL Loss: {train_epoch_loss_ASL:.4f}, Train SML Loss: {train_epoch_loss_SML:.4f}), "
                                f"Test Loss: {test_epoch_loss:.4f} (Encoder: {test_epoch_encoder_loss:.4f}, Decoder: {test_epoch_decoder_loss:.4f}), "
                                f"Bit arr: {bit_arr:.4f}, Psnr: {psnr}, Ssim: {ssim}, "
                                f"Train noiser Loss: {train_epoch_noiser_loss:.4f}, Test noiser Loss:{test_epoch_noiser_loss:.4f}")
                print(f"w_asl={torch.exp(-2*self.log_sigma_asl).item():.4f}, "
                      f"w_sml={torch.exp(-2*self.log_sigma_sml).item():.4f}")
                if self.min_testLoss > test_epoch_loss:
                    self.min_testLoss = test_epoch_loss
                    model_save(self, ep_num, f'{c.model_dir}/best.pth')
                
                if isSave:
                    model_save(self, ep_num, f'{c.model_dir}/{ep_num}.pth')



def save_images(cover_images, encoder_images, noise_image, epoch, folder):
    # 将数据转移到 CPU
    images = cover_images.cpu()
    encoder_images = encoder_images.cpu()
    noise_image = noise_image.cpu()


    # 计算误差放大
    revise_image = torch.clamp(torch.abs(images - encoder_images) * 5, 0, 1)
    revise_image_5 = torch.clamp(torch.abs(images - noise_image) * 5, 0, 1)

    # 拼接图像
    stacked_images = torch.cat([images, encoder_images, noise_image, revise_image, revise_image_5], dim=0)

    # 保存图片
    filename = os.path.join(folder, 'epoch-{}.png'.format(epoch))
    torchvision.utils.save_image(stacked_images, filename, normalize=False)


