# -*- coding:utf-8 -*-
import csv
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import utils
from model.discriminator import Discriminator
import numpy as np
from model.encoder_decoder import EncoderDecoder

def train(device, train_loader, test_loader, messages_length, num_epochs, model_dir, log_file, save_image_dir, en, de, adv, normalize):

    image_length = 128

    mse_loss = nn.MSELoss().to(device)
    bce_loss = nn.BCELoss().to(device)
    bce_with_logits_loss = nn.BCEWithLogitsLoss()
    discriminator = Discriminator().to(device)

    attack = True
    normalize = normalize
    channels = 64
    encoder_decoder = EncoderDecoder(encoder_channels=channels, decoder_channels=channels, H=image_length, W=image_length, M = messages_length, attack=attack)
    encoder_decoder.to(device)

    optimizer = optim.Adam(encoder_decoder.parameters())
    # optimizer = optim.SGD(encoder_decoder.parameters(), lr=0.01)
    optimizer_dis = optim.Adam(discriminator.parameters())

    cover_data_label = 1
    encoder_data_label = 0

    with open(log_file, "w") as f:
        writer = csv.writer(f)
        writer.writerow(["Epoch", "trainLoss", "testLoss", "advLoss", "bit_arr", "psnr", "ssim"])
        # 加载预训练的噪声层参数
        if attack:
            # 加载模型时，使用map_location参数将模型映射到当前设备 在设备1上训练的模型，不能直接加载到设备0中，总之不能在不同设备上运行
            # attack_state_dict = torch.load('/home/lzc/zzc/HiDDeN_pyj/attack_runs/run-Brightness_2023-12-28_23-17-16/model/best_model.pth', map_location=torch.device(device))
            # attack_state_dict = torch.load('/home/lzc/zzc/HiDDeN_pyj/attack_runs/run-Contrast_2023-12-28_23-20-33/model/best_model.pth', map_location=torch.device(device))
            attack_state_dict = torch.load('/home/lzc/zzc/HiDDeN_pyj/attack_runs/run-Saturation_2023-12-28_23-21-25/model/best_model.pth', map_location=torch.device(device))
            # attack_state_dict = torch.load('/home/lzc/zzc/HiDDeN_pyj/attack_runs/run-Hue_2023-12-28_23-22-56/model/best_model.pth', map_location=torch.device(device))
            # attack_state_dict = torch.load('/home/lzc/zzc/HiDDeN_pyj/attack_runs/run-Gaussian(0.06)_2023-12-28_22-52-49/model/best_model.pth', map_location=torch.device(device))
            # attack_state_dict = torch.load('/home/lzc/zzc/HiDDeN_pyj/attack_runs/run-salt_2023-12-28_22-54-10/model/best_model.pth', map_location=torch.device(device))
            # attack_state_dict = torch.load('/home/lzc/zzc/HiDDeN_pyj/attack_runs/run-JPEG(50)_2023-12-28_22-48-48/model/best_model.pth', map_location=torch.device(device))
            # attack_state_dict = torch.load('/home/zzc/deeplearn_test/HiDDeN/attack_runs/run-Quantization_2023-12-28_23-00-07/model/best_model.pth', map_location=torch.device(device))
            # attack_state_dict = torch.load('/home/zzc/Experience/HiDDeN/attack_runs/run-JPEG(50)_Mask_2024-02-27_17-34-52/model/best_model.pth', map_location=torch.device(device))
            encoder_decoder.noiser.load_state_dict(attack_state_dict)
            # 固定噪声层的参数，不进行梯度更新
            for param in encoder_decoder.noiser.parameters():
                param.requires_grad = False

        for epoch in range(num_epochs):
            is_first_save = True
            train_running_loss = 0.0
            test_running_loss = 0.0
            dis_running_loss = 0.0

            for images, _ in train_loader:
                images = images.to(device)
                target_label_cover = torch.full((images.size(0), 1), cover_data_label, device=device)
                target_label_encoded = torch.full((images.size(0), 1), encoder_data_label, device=device)
                target_label_adv = torch.full((images.size(0), 1), cover_data_label, device=device)


                messages = torch.Tensor(np.random.choice([0, 1], (images.shape[0], messages_length))).to(device)

                # 训练阶段
                encoder_decoder.train()
                discriminator.train()
                with torch.enable_grad():
                    # images.requires_grad_(True)
                    # messages.requires_grad_(True)

                    # 训练discriminator
                    optimizer_dis.zero_grad()
                    adv_loss_on_cover = bce_with_logits_loss(discriminator(images), target_label_cover.float())
                    adv_loss_on_cover.backward()

                    encoder_image, noise_image, decoder_message = encoder_decoder(images, messages)

                    adv_loss_on_encoder = bce_loss(discriminator(encoder_image.detach()), target_label_encoded.float())
                    adv_loss_on_encoder.backward()
                    optimizer_dis.step()

                    # 训练encoder_decoder
                    optimizer.zero_grad()
                    adv_loss = bce_with_logits_loss(discriminator(encoder_image), target_label_adv.float())
                    encoder_loss = mse_loss(encoder_image, images)

                    # 计算均方误差作为 decoder_loss
                    decoder_loss = mse_loss(decoder_message, messages)

                    loss = en * encoder_loss + de * decoder_loss + adv * adv_loss
                    loss.backward()
                    optimizer.step()

                train_running_loss += loss.item()

            train_epoch_loss = train_running_loss / len(train_loader)


            test_psnr = 0.0
            test_ssim = 0.0
            test_bit_arr = 0.0
            for images, _ in test_loader:
                images = images.to(device)
                target_label_adv = torch.full((images.size(0), 1), cover_data_label, device=device)

                messages = torch.Tensor(np.random.choice([0, 1], (images.shape[0], messages_length))).to(device)
                    
                encoder_image, noise_image, decoder_message = encoder_decoder(images, messages)
                # 测试阶段
                encoder_decoder.eval()
                discriminator.eval()
                with torch.no_grad():
                    dis_loss = bce_with_logits_loss(discriminator(encoder_image), target_label_adv.float())
                    encoder_loss = mse_loss(images, encoder_image)

                    # 计算均方误差作为 decoder_loss
                    decoder_loss = mse_loss(decoder_message, messages)

                    loss = en * encoder_loss + de * decoder_loss + adv * dis_loss

                    psnr, ssim = utils.val_psnr_ssim(images, encoder_image)
                    test_psnr += psnr
                    test_ssim += ssim
                    test_running_loss += loss.item()
                    dis_running_loss += dis_loss.item()

                    if is_first_save:  # 只保存第一个批次的图像
                        is_first_save = False
                        save_images(images.cpu()[:8, :, :, :],
                                  encoder_image[:8, :, :, :].cpu(), noise_image[:8, :, :, :].cpu(), epoch, save_image_dir, normalize)
                    decoded_rounded = decoder_message.detach().cpu().numpy().round().clip(0, 1)
                    bitwise_err = np.sum(np.abs(decoded_rounded - messages.detach().cpu().numpy())) / (messages.shape[0] * messages_length)
                    test_bit_arr += bitwise_err

            test_epoch_loss = test_running_loss / len(test_loader)
            dis_epoch_loss = dis_running_loss / len(test_loader)
            psnr = test_psnr / len(test_loader)
            ssim = test_ssim / len(test_loader)
            bit_arr = test_bit_arr / len(test_loader)

            # 格式化损失为保留4位小数的字符串
            train_loss_str = "{:.4f}".format(train_epoch_loss)
            test_loss_str = "{:.4f}".format(test_epoch_loss)
            dis_loss_str = "{:.4f}".format(dis_epoch_loss)
            psnr = "{:.2f}".format(psnr)
            ssim = "{:.2f}".format(ssim)

            writer.writerow([epoch+1, train_loss_str, test_loss_str, dis_loss_str, bit_arr, psnr, ssim])

            print(f"Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_epoch_loss:.4f}, Test Loss: {test_epoch_loss:.4f}, Adv Loss：{dis_epoch_loss}, Bit arr：{bit_arr}, Psnr：{psnr}, Ssim：{ssim}")

            save_path = os.path.join(model_dir, "{}_model.pth".format(epoch + 1))  # 模型保存路径
            torch.save(encoder_decoder.state_dict(), save_path)  # 保存当前最佳模型权重

    print(f"Training log saved to {log_file}.")
    print("Training finished.")

def save_images(cover_images, encoder_images, noise_image, epoch, folder, normalize):
    images = cover_images[:cover_images.shape[0], :, :, :].cpu()
    encoder_images = encoder_images[:encoder_images.shape[0], :, :, :].cpu()
    noise_image = noise_image[:noise_image.shape[0], :, :, :].cpu()

    # scale values to range [0, 1] from original range of [-1, 1]
    if normalize:
        images = (images + 1) / 2
        encoder_images = (encoder_images + 1) / 2
        noise_image = (noise_image + 1) / 2

    revise_image = torch.abs(images - encoder_images) * 5
    revise_image_5 = torch.abs(images - noise_image) * 5

    stacked_images = torch.cat([images, encoder_images, noise_image, revise_image, revise_image_5], dim=0)
    filename = os.path.join(folder, 'epoch-{}.png'.format(epoch + 1))
    # torchvision.utils.save_image(stacked_images, filename=filename, original_images.shape[0], normalize=False)
    torchvision.utils.save_image(stacked_images, filename, normalize=False)