import os  # 导入操作系统模块
import time  # 导入时间模块
import pprint  # 导入美观打印模块
import argparse  # 导入参数解析模块
import torch  # 导入PyTorch深度学习框架
import numpy as np  # 导入NumPy库
import pickle  # 导入pickle模块，用于序列化和反序列化
import utils  # 导入自定义的工具模块
import csv  # 导入CSV文件处理模块

from model.hidden import Hidden  # 从模型文件中导入Hidden类
from noise_layers.noiser import Noiser  # 从噪声层文件中导入Noiser类
from average_meter import AverageMeter  # 导入自定义的平均计量器类


def write_validation_loss(file_name, losses_accu, experiment_name, epoch, write_header=False):
    """
    将验证损失写入CSV文件。

    Args:
        file_name (str): 要写入CSV数据的文件名。
        losses_accu (dict): 包含平均损失的字典。
        experiment_name (str): 实验或运行的名称。
        epoch (int): 当前时期。
        write_header (bool): 是否在CSV文件中写入标题。

    Returns:
        None
    """
    with open(file_name, 'a', newline='') as csvfile:
        writer = csv.writer(csvfile)
        if write_header:
            row_to_write = ['experiment_name', 'epoch'] + [loss_name.strip() for loss_name in losses_accu.keys()]
            writer.writerow(row_to_write)
        row_to_write = [experiment_name, epoch] + ['{:.4f}'.format(loss_avg.avg) for loss_avg in losses_accu.values()]
        writer.writerow(row_to_write)


def main():
    # device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    device = torch.device('cpu')  # 根据GPU是否可用选择设备

    parser = argparse.ArgumentParser(description='HiDDeN网络的训练')  # 创建参数解析器
    # parser.add_argument('--size', '-s', default=128, type=int, help='图像的大小（图像是正方形的，因此这是高度和宽度）。
    parser.add_argument('--data-dir', '-d', required=True, type=str, help='数据存储的目录。')  # 添加数据目录参数
    parser.add_argument('--runs_root', '-r', default=os.path.join('.', 'experiments'), type=str,
                        help='存储有关实验的数据的根文件夹。')  # 添加运行根目录参数
    parser.add_argument('--batch-size', '-b', default=1, type=int, help='验证批量大小。')  # 添加批量大小参数

    args = parser.parse_args()  # 解析命令行参数
    print_each = 25

    completed_runs = [o for o in os.listdir(args.runs_root)
                      if os.path.isdir(os.path.join(args.runs_root, o)) and o != 'no-noise-defaults']  # 获取已完成运行的列表

    print(completed_runs)

    write_csv_header = True  # 写入CSV文件的标志
    for run_name in completed_runs:
        current_run = os.path.join(args.runs_root, run_name)
        print(f'运行文件夹: {current_run}')
        options_file = os.path.join(current_run, 'options-and-config.pickle')
        train_options, hidden_config, noise_config = utils.load_options(options_file)  # 从pickle文件加载训练和配置选项
        train_options.train_folder = os.path.join(args.data_dir, 'val')
        train_options.validation_folder = os.path.join(args.data_dir, 'val')
        train_options.batch_size = args.batch_size
        checkpoint, chpt_file_name = utils.load_last_checkpoint(os.path.join(current_run, 'checkpoints'))
        print(f'从文件 {chpt_file_name} 加载检查点')

        noiser = Noiser(noise_config)
        model = Hidden(hidden_config, device, noiser, tb_logger=None)
        utils.model_from_checkpoint(model, checkpoint)

        print('模型成功加载。开始验证运行...')
        _, val_data = utils.get_data_loaders(hidden_config, train_options)
        file_count = len(val_data.dataset)
        if file_count % train_options.batch_size == 0:
            steps_in_epoch = file_count // train_options.batch_size
        else:
            steps_in_epoch = file_count // train_options.batch_size + 1

        losses_accu = {}
        step = 0
        for image, _ in val_data:
            step += 1
            image = image.to(device)
            message = torch.Tensor(np.random.choice([0, 1], (image.shape[0], hidden_config.message_length))).to(device)
            losses, (encoded_images, noised_images, decoded_messages) = model.validate_on_batch([image, message],
                                                                                                set_eval_mode=True)
            if not losses_accu:  # 字典为空，初始化
                for name in losses:
                    losses_accu[name] = AverageMeter()
            for name, loss in losses.items():
                losses_accu[name].update(loss)
            if step % print_each == 0 or step == steps_in_epoch:
                print(f'步骤 {step}/{steps_in_epoch}')
                utils.print_progress(losses_accu)
                print('-' * 40)

        # utils.print_progress(losses_accu)
        write_validation_loss(os.path.join(args.runs_root, 'validation_run.csv'), losses_accu, run_name,
                              checkpoint['epoch'],
                              write_header=write_csv_header)
        write_csv_header = False

    # train(model, device, hidden_config, train_options, this_run_folder, tb_logger)


if __name__ == '__main__':
    main()
