# -*- coding:utf-8 -*-
import csv
import os
import torch
import torch.nn as nn
import torchvision
import utils
from model.discriminator import Discriminator
import numpy as np
from model.encoder_decoder import EncoderDecoder
import datetime
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
import time

def validate(device, test_loader, messages_length, num_epochs, log_file, save_image_dir, en, de, adv, normalize):
    image_length = 128
    startTime = time.time()

    mse_loss = nn.MSELoss().to(device)
    bce_with_logits_loss = nn.BCEWithLogitsLoss().to(device)
    discriminator = Discriminator().to(device)

    attack = False
    normalize = normalize
    channels = 64
    encoder_decoder = EncoderDecoder(encoder_channels=channels, decoder_channels=channels, H=image_length, W=image_length, M = messages_length, attack=attack)
    #######################
    #   Load Parameter    #
    #######################
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_base_2024-03-17_23-08-58/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_Brightness(1.5-1.6)-no-normalize_2024-03-17_22-17-37/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_Contrast-no-normalize_2024-03-18_07-43-41/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_Saturation-no-normalize_2024-03-18_07-46-12/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_Hue(（-0.3）-（-0.2）)-no-normalize_2024-03-16_08-30-43/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN/runs-works/run_Jpeg-50-no-normalize_2024-03-18_11-17-28/model/100_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_Gaussian-no-normalize_2024-03-16_08-33-03/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_salt-no-normalize_2024-03-16_20-19-10/model/300_model.pth', map_location=torch.device(device))
    
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_HiDDeN-Brightness-no-normalize_2024-03-17_22-15-56/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_HiDDeN-Contrast-no-normalize_2024-03-18_07-44-53/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_HiDDeN-Saturation-no-normalize_2024-03-17_09-53-56/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_HiDDeN-Hue-no-normalize_2024-03-17_22-34-58/model/247_model.pth', map_location=torch.device(device))
    encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN/runs-works/run_HiDDeN-JpegMass-no-normalize_2024-03-18_08-33-23/model/100_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_HiDDeN-Gaussian-no-normalize_2024-03-17_15-39-19/model/300_model.pth', map_location=torch.device(device))
    # encoder_decoder_state = torch.load('/home/lzc/zzc/HiDDeN_pyj_zzc/runs-works/run_HiDDeN-Salt-no-normalize_2024-03-17_15-40-16/model/300_model.pth', map_location=torch.device(device))
    encoder_decoder.load_state_dict(encoder_decoder_state)
    encoder_decoder.to(device)


    cover_data_label = 1
    endTime = time.time()
    print(f"Execution time: {endTime - startTime} seconds")


    with open(log_file, "w") as f:
        writer = csv.writer(f)
        writer.writerow(["Epoch", "testLoss", "advLoss", "bit_arr", "psnr", "ssim"])

        for epoch in range(num_epochs):
            is_first_save = True
            test_running_loss = 0.0
            dis_running_loss = 0.0

            test_psnr = 0.0
            test_ssim = 0.0
            test_bit_arr = 0.0
            for images, _ in test_loader:
                startTime = time.time()
                images = images.to(device)
                target_label_adv = torch.full((images.size(0), 1), cover_data_label, device=device)
                
                messages = torch.Tensor(np.random.choice([0, 1], (images.shape[0], messages_length))).to(device)

                encoder_image, noise_image, decoder_message = encoder_decoder(images, messages)
                # 测试阶段
                encoder_decoder.eval()
                discriminator.eval()
                with torch.no_grad():
                    dis_loss = bce_with_logits_loss(discriminator(encoder_image), target_label_adv.float())
                    encoder_loss = mse_loss(images, encoder_image)
                    
                    # 计算均方误差作为 decoder_loss
                    decoder_loss = mse_loss(decoder_message, messages)          

                    # decoder_loss = mse_loss(messages, decoder_message)
                    loss = en * encoder_loss + de * decoder_loss + adv * dis_loss

                    psnr, ssim = utils.val_psnr_ssim(images, encoder_image)
                    test_psnr += psnr
                    test_ssim += ssim
                    test_running_loss += loss.item()
                    dis_running_loss += dis_loss.item()

                    if is_first_save:  # 只保存第一个批次的图像
                        is_first_save = False
                        save_images(images.cpu()[:8, :, :, :],
                                  encoder_image[:8, :, :, :].cpu(), noise_image[:8, :, :, :].cpu(), epoch, save_image_dir, normalize)
                    decoded_rounded = decoder_message.detach().cpu().numpy().round().clip(0, 1)
                    bitwise_err = np.sum(np.abs(decoded_rounded - messages.detach().cpu().numpy())) / (messages.shape[0] * messages_length)
                    test_bit_arr += bitwise_err
                endTime = time.time()
                print(f"Epoch [{epoch + 1}/{num_epochs}] Execution time a batch: {endTime - startTime} seconds")

            test_epoch_loss = test_running_loss / len(test_loader)
            dis_epoch_loss = dis_running_loss / len(test_loader)
            psnr = test_psnr / len(test_loader)
            ssim = test_ssim / len(test_loader)
            bit_arr = test_bit_arr / len(test_loader)

            # 格式化损失为保留4位小数的字符串
            test_loss_str = "{:.4f}".format(test_epoch_loss)
            dis_loss_str = "{:.4f}".format(dis_epoch_loss)
            psnr = "{:.2f}".format(psnr)
            ssim = "{:.2f}".format(ssim)

            writer.writerow([epoch+1, test_loss_str, dis_loss_str, bit_arr, psnr, ssim])

            print(f"Epoch [{epoch + 1}/{num_epochs}], Test Loss: {test_epoch_loss:.4f}, Adv Loss：{dis_epoch_loss}, Bit arr：{bit_arr}, Psnr：{psnr}, Ssim：{ssim}")


    print(f"Training log saved to {log_file}.")
    print("Training finished.")

def save_images(cover_images, encoder_images, noise_image, epoch, folder, normalize):
    images = cover_images[:cover_images.shape[0], :, :, :]
    encoder_images = encoder_images[:encoder_images.shape[0], :, :, :]
    noise_image = noise_image[:noise_image.shape[0], :, :, :]

    # scale values to range [0, 1] from original range of [-1, 1]
    if normalize:
        images = (images + 1) / 2
        encoder_images = (encoder_images + 1) / 2
        noise_image = (noise_image + 1) / 2

    revise_image = torch.abs(images - encoder_images) * 5
    revise_image_5 = torch.abs(images - noise_image) * 5

    stacked_images = torch.cat([images, encoder_images, noise_image, revise_image, revise_image_5], dim=0)
    filename = os.path.join(folder, 'epoch-{}.png'.format(epoch + 1))
    # torchvision.utils.save_image(stacked_images, filename=filename, original_images.shape[0], normalize=False)
    torchvision.utils.save_image(stacked_images, filename, normalize=False)

def main():

    device = torch.device('cuda:3') if torch.cuda.is_available() else torch.device('cpu')
    print(device)
    file_dir = "runs-works-test"
    if not os.path.exists(file_dir):
        os.makedirs(file_dir)

    # 获取当前日期和时间
    current_time = datetime.datetime.now()
    # 格式化日期和时间
    time_str = current_time.strftime("%Y-%m-%d_%H-%M-%S")

    # name = "run" + "_" + "base" + "_" + time_str
    # name = "run" + "_" + "base" + "_" + "Noise_Brightness"+ "_" + time_str
    # name = "run" + "_" + "base" + "_" + "Noise_Contrast"+ "_" + time_str
    # name = "run" + "_" + "base" + "_" + "Noise_Saturation"+ "_" + time_str
    # name = "run" + "_" + "base" + "_" + "Noise_Hue"+ "_" + time_str
    # name = "run" + "_" + "base" + "_" + "Noise_Jpeg"+ "_" + time_str
    # name = "run" + "_" + "base" + "_" + "Noise_Gaussian"+ "_" + time_str
    # name = "run" + "_" + "base" + "_" + "Noise_Salt"+ "_" + time_str

    # name = "run" + "_" + "Train—Brightness-no-normalize" + "_" + time_str
    # name = "run" + "_" + "Brightness" + "_" + time_str
    # name = "run" + "_" + "Brightness" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "Brightness" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "Brightness" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "Brightness" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "Brightness" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "Brightness" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "Train-Contrast" + "_" + time_str
    # name = "run" + "_" + "Contrast" + "_" + time_str
    # name = "run" + "_" + "Contrast" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "Contrast" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "Contrast" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "Contrast" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "Contrast" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "Contrast" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "Train-Saturation-no-normalize" + "_" + time_str
    # name = "run" + "_" + "Saturation" + "_" + time_str
    # name = "run" + "_" + "Saturation" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "Saturation" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "Saturation" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "Saturation" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "Saturation" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "Saturation" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "Train-Hue" + "_" + time_str
    # name = "run" + "_" + "Hue" + "_" + time_str
    # name = "run" + "_" + "Hue" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "Hue" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "Hue" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "Hue" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "Hue" + "_" + "Noise_Gaussian"+ "_"+ time_str
    # name = "run" + "_" + "Hue" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "Train-Jpeg-50" + "_" + time_str
    # name = "run" + "_" + "Jpeg-50" + "_" + time_str
    # name = "run" + "_" + "Jpeg" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "Jpeg" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "Jpeg" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "Jpeg" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "Jpeg" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "Jpeg" + "_" + "Noise_Salt"+ "_"+ time_str
    
    # name = "run" + "_" + "Train-Gaussian" + "_" + time_str
    # name = "run" + "_" + "Gaussian" + "_" + time_str
    # name = "run" + "_" + "Gaussian" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "Gaussian" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "Gaussian" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "Gaussian" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "Gaussian" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "Gaussian" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "Train-Salt" + "_" + time_str
    # name = "run" + "_" + "Salt" + "_" + time_str
    # name = "run" + "_" + "Salt" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "Salt" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "Salt" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "Salt" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "Salt" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "Salt" + "_" + "Noise_Gaussain"+ "_"+ time_str

    # name = "run" + "_" + "Contrast(1.5-1.6)-no-normalize" + "_" + time_str
    # name = "run" + "_" + "Saturation(1.5-1.6)-no-normalize" + "_" + time_str
    # name = "run" + "_" + "Hue(（-0.3）-（-0.2）)-no-normalize" + "_" + time_str
    # name = "run" + "_" + "Jpeg-50-no-normalize" + "_" + time_str

    # name = "run" + "_" + "HiDDeN-Brightness" + "_" + time_str
    # name = "run" + "_" + "HiDDeN-Brightness" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Brightness" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Brightness" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Brightness" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Brightness" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Brightness" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "HiDDeN-Contrast" + "_" + time_str
    # name = "run" + "_" + "HiDDeN-Contrast" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Contrast" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Contrast" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Contrast" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Contrast" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Contrast" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "HiDDeN-Saturation" + "_" + time_str
    # name = "run" + "_" + "HiDDeN-Saturation" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Saturation" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Saturation" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Saturation" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Saturation" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Saturation" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "HiDDeN-Hue" + "_" + time_str
    # name = "run" + "_" + "HiDDeN-Hue" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Hue" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Hue" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Hue" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Hue" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Hue" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "HiDDeN-JpegMass" + "_" + time_str
    name = "run" + "_" + "HiDDeN-Jpeg" + "_" + time_str
    # name = "run" + "_" + "HiDDeN-JpegMass" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-JpegMass" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-JpegMass" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-JpegMass" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-JpegMass" + "_" + "Noise_Gaussain"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-JpegMass" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "HiDDeN-Gaussian" + "_" + time_str
    # name = "run" + "_" + "HiDDeN-Gaussian" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Gaussian" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Gaussian" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Gaussian" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Gaussian" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Gaussian" + "_" + "Noise_Salt"+ "_"+ time_str

    # name = "run" + "_" + "HiDDeN-Salt" + "_" + time_str
    # name = "run" + "_" + "HiDDeN-Salt" + "_" + "Noise_Brightness"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Salt" + "_" + "Noise_Contrast"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Salt" + "_" + "Noise_Saturation"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Salt" + "_" + "Noise_Hue"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Salt" + "_" + "Noise_Jpeg"+ "_"+ time_str
    # name = "run" + "_" + "HiDDeN-Salt" + "_" + "Noise_Gaussain"+ "_"+ time_str   
    train_dir = os.path.join(file_dir, name)
    os.makedirs(train_dir, exist_ok=True)

    test_data_dir = "/home/lzc/DRHiNet/data/val"     # 测试数据
    
    log_dir = "log"
    log_dir = os.path.join(train_dir, log_dir)
    
    os.makedirs(log_dir, exist_ok=True)

    log_file = os.path.join(log_dir, "log.csv")

    save_image_dir = "image"
    save_image_dir = os.path.join(train_dir, save_image_dir)
    os.makedirs(save_image_dir, exist_ok=True)

    # Super parameter
    image_size = 128
    batch_size = 32
    message_length = 30
    num_epochs = 1

    # 是否进行归一化
    normalize = False

    # 定义图像预处理的转换操作
    data_transforms = {
        'train': transforms.Compose([
            # 针对训练集的数据预处理：
            # 随机裁剪图像到指定尺寸（image_size）并进行填充（pad_if_needed=True）
            transforms.RandomCrop((image_size, image_size), pad_if_needed=True),
            # 将图像转换为张量（Tensor）格式
            transforms.ToTensor(),
            # 如果需要进行图像归一化，将像素值从 [0, 255] 映射到 [-1, 1]
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) if normalize else transforms.Compose([]),
        ]),
        'test': transforms.Compose([
            # 针对测试集的数据预处理：
            # 居中裁剪图像到指定尺寸（image_size）
            transforms.CenterCrop((image_size, image_size)),
            # 将图像转换为张量（Tensor）格式
            transforms.ToTensor(),
            # 如果需要进行图像归一化，将像素值从 [0, 255] 映射到 [-1, 1]
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) if normalize else transforms.Compose([]),
        ])
    }

    # 创建ImageFolder数据集实例
    test_dataset = ImageFolder(test_data_dir, transform=data_transforms['test'])

    # 创建数据加载器
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

    en = 0.7
    de = 1.0
    adv = 0.001

    validate(device, test_loader, message_length, num_epochs, log_file, save_image_dir, en, de, adv, normalize)

if __name__ == '__main__':
    main()