import os
import argparse
import torch
import yaml
import torch.nn as nn
from epoch import TrainEpoch, ValEpoch
from metrics import FourMetrics
from discriminator import Discriminator
from funcs import Logger, load_checkpoint, get_train_val_loader, get_learning_rate, format_logs, save_model
from generator import Generator
from loaders import JIDataset

os.environ["CUDA_VISIBLE_DEVICES"] = "6"


def parse_args():
    config_parser = argparse.ArgumentParser(description='Training Config', add_help=False)
    config_parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', help='YAML config file')
    parser = argparse.ArgumentParser(description='training process')
    parser.add_argument('log', type=str)
    parser.add_argument('--work_dirs', type=str, default='../work_dirs/GAN')
    parser.add_argument('--iterations', type=int, default=10000)
    parser.add_argument('--si', '--show_interval', type=int, default=1000)
    parser.add_argument('--bs', '--batch_size', type=int, default=16)
    parser.add_argument('--vbs', '--val_batch_size', type=int, default=4)
    parser.add_argument('--ratio', '--labeled_ratio', type=float, default=0.5)
    parser.add_argument('--amp', action='store_true', default=True)
    parser.add_argument('--data_parallel', action='store_true', default=True)
    args_config, remaining = config_parser.parse_known_args()
    if args_config.config:
        with open(args_config.config, 'r') as f:
            cfg = yaml.safe_load(f)
            parser.set_defaults(**cfg)
    args = parser.parse_args(remaining)
    return args


def train():
    num_classes = 2
    args = parse_args()

    save_dir = os.path.join(args.work_dirs, args.log)
    os.makedirs(save_dir, exist_ok=True)
    logger = Logger(os.path.join(save_dir, "train.log"))
    logger.write(str(args))

    # 数据集与dataloader
    train_dataset = JIDataset("train")
    val_dataset = JIDataset("test")

    loaders = get_train_val_loader(train_dataset, val_dataset, args.bs, args.vbs, args.ratio)
    train_loader, train_loader_remain, train_loader_gt, val_loader = loaders

    train_iter = iter(train_loader)
    train_gt_iter = iter(train_loader_gt)
    train_remain_iter = iter(train_loader_remain)

    # 网络
    generator = Generator(num_classes, True)
    discriminator = Discriminator(num_classes)

    # 导入权重
    # load_checkpoint(generator, "checkpoint_path")
    # load_checkpoint(discriminator, "discriminator")

    # 并行
    if args.data_parallel:
        generator = nn.DataParallel(generator)
        discriminator = nn.DataParallel(discriminator)

    opt_generator = torch.optim.Adam(generator.parameters(), lr=1E-4)
    opt_discriminator = torch.optim.Adam(generator.parameters(), lr=1E-4)

    criterion_generator = nn.CrossEntropyLoss()
    criterion_discriminator = nn.BCEWithLogitsLoss()

    metric = FourMetrics(False, num_classes)

    train_runner = TrainEpoch(num_classes,
                              generator, discriminator,
                              criterion_generator, criterion_discriminator,
                              opt_generator, opt_discriminator,
                              metric)
    val_runner = ValEpoch(num_classes, generator, criterion_generator, metric)

    best_val_metric = 0
    bms = 0
    for i in range(0, args.iterations, args.si):
        print('Iteration: %d - lr: %.5f' % (i, get_learning_rate(opt_generator)))
        train_log = train_runner.run(args.si,
                                     train_loader, train_loader_remain, train_loader_gt,
                                     train_iter, train_gt_iter, train_remain_iter)
        val_log = val_runner.run(val_loader)
        val_metric = val_log['iou']

        # 保存最新模型
        save_model(generator, os.path.join(save_dir, 'g_latest.pth'), i, val_log['loss'], val_metric)
        save_model(discriminator, os.path.join(save_dir, 'd_latest.pth'), i, train_log['loss_D'], val_metric)

        # 保存最好metric模型
        if val_log['iou'] > best_val_metric:
            best_val_metric = val_log['iou']
            bms = val_log
            save_model(generator, os.path.join(save_dir, 'g_best.pth'), i, val_log['loss'], val_metric)
            save_model(discriminator, os.path.join(save_dir, 'd_best.pth'), i, train_log['loss_D'], val_metric)

        logger.write('Epoch:\t' + str(i))
        logger.write('Train:\t' + format_logs(train_log))
        logger.write('Val:\t' + format_logs(val_log))
        logger.write("Best:\t" + format_logs(bms))
        logger.write("\n")

        print("train:", train_log)
        print("val:", val_log)
        print("best_metric:\t" + format_logs(bms))


if __name__ == "__main__":
    train()
