# -*- coding:utf-8 -*-
import csv
import os

from options import HiDDenConfiguration
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import utils
from model.discriminator import Discriminator
import numpy as np
from model.encoder_decoder import EncoderDecoder
from modules.UnetPlusPlus_L6 import UNetPlusPlusL6_Attack
from noise_layers.jpeg import Jpeg
from noise_layers.colorjitter import ColorJitter
from model.attack import Attack



def train_ED(train_loader, test_loader, messages_length, num_epochs, model_dir, log_file, save_image_dir, en, de, adv, encoder_decoder, discriminator, noiser, optimizer, optimizer_dis):

    """JPEG压缩"""
    jpeg = Jpeg(Q=90).cuda()

    ################
    ###   Loss   ###
    ################
    mse_loss = nn.MSELoss().cuda()
    L1 = nn.L1Loss().cuda()
    bce_with_logits_loss = nn.BCEWithLogitsLoss()
 

    cover_data_label = 1
    encoder_data_label = 0

    with open(log_file, "w") as f:
        writer = csv.writer(f)
        writer.writerow(["Epoch", "trainLoss", "train_encoder_loss", "train_decoder_loss", 
                "testLoss", "test_encoder_loss", "test_decoder_loss", 
                "advLoss", "bit_arr", "psnr", "ssim"])


        for epoch in range(num_epochs):
            # 训练阶段
            encoder_decoder.train()
            discriminator.train()
            
            is_first_save = True
            train_encoder_loss = 0.0
            train_decoder_loss = 0.0
            train_running_loss = 0.0

            for images, _ in train_loader:
                images = images.cuda()
                

                target_label_cover = torch.full((images.size(0), 1), cover_data_label).cuda()
                target_label_encoded = torch.full((images.size(0), 1), encoder_data_label).cuda()
                target_label_adv = torch.full((images.size(0), 1), cover_data_label).cuda()
                messages = torch.Tensor(np.random.choice([0, 1], size=[images.size(0), 1, messages_length, messages_length])).cuda()
                stego = encoder_decoder.encoder(images, messages)
                print(L1(noiser(stego), jpeg(stego)))
                with torch.enable_grad():

                    # 训练discriminator
                    optimizer_dis.zero_grad()
                    adv_loss_on_cover = bce_with_logits_loss(discriminator(images), target_label_cover.float())
                    adv_loss_on_cover.backward()

                    encoder_image = encoder_decoder.encoder(images, messages)
                    noise_image = noiser(encoder_image)
                    decoder_message = encoder_decoder.decoder(noise_image)

                    # print(L1(encoder_decoder.noiser(images),encoder_decoder.jpeg(images)))
                    adv_loss_on_encoder = bce_with_logits_loss(discriminator(encoder_image.detach()), target_label_encoded.float())
                    adv_loss_on_encoder.backward()
                    optimizer_dis.step()

                    # 训练encoder_decoder
                    optimizer.zero_grad()
                    adv_loss = bce_with_logits_loss(discriminator(encoder_image), target_label_adv.float())
                    encoder_loss = mse_loss(encoder_image, images)
                    decoder_loss = mse_loss(decoder_message, messages)

                    loss = en * encoder_loss + de * decoder_loss + adv * adv_loss
                    loss.backward()
                    optimizer.step()

                train_encoder_loss += encoder_loss.item()
                train_decoder_loss += decoder_loss.item()
                train_running_loss += loss.item()
                # decoded_rounded = decoder_message.detach().cpu().numpy().round().clip(0, 1)
                # bitwise_err = np.sum(np.abs(decoded_rounded - messages.detach().cpu().numpy())) / (messages.shape[0] * messages.shape[1] * messages.shape[2] * messages.shape[3])
                # print(bitwise_err)
            train_epoch_encoder_loss = train_encoder_loss / len(train_loader)
            train_epoch_decoder_loss = train_decoder_loss / len(train_loader)
            train_epoch_loss = train_running_loss / len(train_loader)

            # 测试阶段
            encoder_decoder.eval()
            discriminator.eval()

            test_encoder_loss = 0.0
            test_decoder_loss = 0.0
            test_psnr = 0.0
            test_ssim = 0.0
            test_bit_arr = 0.0
            test_running_loss = 0.0
            dis_running_loss = 0.0
            with torch.no_grad():
                for images, _ in test_loader:
                    images = images.cuda()
                    target_label_adv = torch.full((images.size(0), 1), cover_data_label).cuda()

                    messages = torch.Tensor(np.random.choice([0, 1], size=[images.size(0), 1, messages_length, messages_length])).cuda()
                    encoder_image = encoder_decoder.encoder(images, messages)
                    noise_image = jpeg(encoder_image)
                    decoder_message = encoder_decoder.decoder(noise_image)
                    
                    dis_loss = bce_with_logits_loss(discriminator(encoder_image), target_label_adv.float())
                    encoder_loss = mse_loss(images, encoder_image)
                    decoder_loss = mse_loss(messages, decoder_message)
                    loss = en * encoder_loss + de * decoder_loss + adv * dis_loss

                    psnr, ssim = utils.val_psnr_ssim(images, encoder_image)
                    test_psnr += psnr
                    test_ssim += ssim

                    test_encoder_loss += encoder_loss.item()
                    test_decoder_loss += decoder_loss.item()
                    test_running_loss += loss.item()
                    dis_running_loss += dis_loss.item()

                    if is_first_save:  # 只保存第一个批次的图像
                        is_first_save = False
                        save_images(images.cpu()[:8, :, :, :],
                                    encoder_image[:8, :, :, :].cpu(), noise_image[:8, :, :, :].cpu(), epoch, save_image_dir)
                    decoded_rounded = decoder_message.detach().cpu().numpy().round().clip(0, 1)
                    bitwise_err = np.sum(np.abs(decoded_rounded - messages.detach().cpu().numpy())) / (messages.shape[0] * messages.shape[1] * messages.shape[2] * messages.shape[3])
                    test_bit_arr += bitwise_err

            test_epoch_encoder_loss = test_encoder_loss / len(test_loader)
            test_epoch_decoder_loss = test_decoder_loss / len(test_loader)
            test_epoch_loss = test_running_loss / len(test_loader)
            dis_epoch_loss = dis_running_loss / len(test_loader)
            psnr = test_psnr / len(test_loader)
            ssim = test_ssim / len(test_loader)
            bit_arr = test_bit_arr / len(test_loader)


            writer.writerow([
                epoch+1, 
                f"{train_epoch_loss:.4f}", 
                f"{train_epoch_encoder_loss:.4f}", 
                f"{train_epoch_decoder_loss:.4f}", 
                f"{test_epoch_loss:.4f}", 
                f"{test_epoch_encoder_loss:.4f}", 
                f"{test_epoch_decoder_loss:.4f}", 
                f"{dis_epoch_loss:.4f}", 
                f"{bit_arr:.4f}", 
                f"{psnr:.2f}", 
                f"{ssim:.2f}"
            ])
            f.flush()
            print(f"Epoch [{epoch+1}/{num_epochs}], "
                            f"Train Loss: {train_epoch_loss:.4f} (Encoder: {train_epoch_encoder_loss:.4f}, Decoder: {train_epoch_decoder_loss:.4f}), "
                            f"Test Loss: {test_epoch_loss:.4f} (Encoder: {test_epoch_encoder_loss:.4f}, Decoder: {test_epoch_decoder_loss:.4f}), "
                            f"Adv Loss: {dis_epoch_loss:.4f}, Bit arr: {bit_arr:.4f}, Psnr: {psnr}, Ssim: {ssim}")
            
            enc_save_path = os.path.join(model_dir, "{}_enc.pth".format(epoch + 1))  # 模型保存路径
            dec_save_path = os.path.join(model_dir, "{}_dec.pth".format(epoch + 1))  # 模型保存路径
            opt_save_path = os.path.join(model_dir, "{}_opt.pth".format(epoch + 1))  # 模型保存路径
            disopt_save_path = os.path.join(model_dir, "{}_opt.pth".format(epoch + 1))  # 模型保存路径
            torch.save(encoder_decoder.encoder.state_dict(), enc_save_path)  # 保存当前最佳模型权重
            torch.save(encoder_decoder.decoder.state_dict(), dec_save_path)  # 保存当前最佳模型权重
            torch.save(optimizer.state_dict(), opt_save_path)  # 保存当前最佳模型权重
            torch.save(optimizer_dis.state_dict(), disopt_save_path)  # 保存当前最佳模型权重

    print(f"Training log saved to {log_file}.")
    print("Training finished.")

def save_images(cover_images, encoder_images, noise_image, epoch, folder):
    # 将数据转移到 CPU
    images = cover_images.cpu()
    encoder_images = encoder_images.cpu()
    noise_image = noise_image.cpu()


    # 计算误差放大
    revise_image = torch.clamp(torch.abs(images - encoder_images) * 5, 0, 1)
    revise_image_5 = torch.clamp(torch.abs(images - noise_image) * 5, 0, 1)

    # 拼接图像
    stacked_images = torch.cat([images, encoder_images, noise_image, revise_image, revise_image_5], dim=0)

    # 保存图片
    filename = os.path.join(folder, 'epoch-{}.png'.format(epoch + 1))
    torchvision.utils.save_image(stacked_images, filename, normalize=False)