import os

import torch
import torch.nn as nn
from torch.backends import cudnn
from torch.cuda import amp
from torch.utils.data.dataloader import DataLoader

from Epoch import TrainEpoch, ValEpoch


class Logger:
    def __init__(self, log_path):
        self.log_path = log_path

    def write(self, txt):
        with open(self.log_path, 'a') as f:
            f.write(txt)
            f.write("\r\n")


def get_learning_rate_group(optimizer):
    lr = []
    for param_group in optimizer.param_groups:
        lr += [param_group['lr']]
    return lr


def get_learning_rate(optimizer):
    return optimizer.param_groups[0]['lr']


def format_logs(logs):
    str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
    s = ', '.join(str_logs)
    return s


def get_train_val_loader(train_dataset, val_dataset, batch_size, num_workers, vb=-1):
    if vb == -1:
        vb = batch_size
    train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, num_workers=num_workers, drop_last=True,
                              shuffle=True)
    val_loader = DataLoader(dataset=val_dataset, batch_size=vb, num_workers=1, shuffle=False)
    return train_loader, val_loader


def load_checkpoint(model, checkpoint_path):
    if not os.path.exists(checkpoint_path):
        checkpoint_path = checkpoint_path.replace("peter/zze/codes/", "hjr/zze/")
    checkpoint = torch.load(checkpoint_path)
    model.load_state_dict(checkpoint['net'])
    del checkpoint


def base_train(args, args_text,
               train_dataset, val_dataset,
               net, checkpoint_path,
               optimizer, criterion, metrics, scheduler=None):
    # 保存log
    save_dir = os.path.join(args.work_dirs, args.log)
    os.makedirs(save_dir, exist_ok=True)
    with open(os.path.join(save_dir, 'args.yaml'), 'w') as f:
        f.write(args_text)
    logger = Logger(save_dir + '/' + 'train.log')
    logger.write(str(args))

    # dataloader
    train_loader, val_loader = get_train_val_loader(train_dataset, val_dataset, args.bs, 4, args.vbs)

    logger.write('Train num:%d batches:%d ---- Valid num:%d batches:%d' %
                 (len(train_dataset), len(train_loader), len(val_dataset), len(val_loader)))
    # 混合精度训练
    scaler = None
    if args.amp:
        scaler = amp.GradScaler()

    # 导入权重
    if checkpoint_path:
        load_checkpoint(net, checkpoint_path)

    # 并行
    if args.data_parallel:
        net = nn.DataParallel(net)

    train_runner = TrainEpoch(scaler, net, criterion, metrics, optimizer, 'train', "cuda", args.amp)
    val_runner = ValEpoch(scaler, net, criterion, metrics, 'val', "cuda")

    best_metric = 0
    bms = 0
    for epoch in range(args.epochs):
        print('Epoch: %d - lr: %.5f' % (epoch, get_learning_rate(optimizer)))
        train_log = train_runner.run(epoch, train_loader)
        val_log = val_runner.run(epoch, val_loader)

        if scheduler is not None:
            scheduler.step()

        val_loss, val_metric = val_log['loss'], val_log['iou']

        # 保存最新模型
        state_dict = net.module.state_dict() if hasattr(net, 'module') else net.state_dict()
        torch.save({
            'net': state_dict,
            'epoch': epoch,
            'loss': val_loss,
            'metric': val_metric
        }, os.path.join(save_dir, 'model_latest.pth'))

        # 保存最好模型
        if val_metric > best_metric:
            best_metric = val_metric
            bms = val_log
            torch.save({
                'net': state_dict,
                'epoch': epoch,
                'loss': val_loss,
                'metric': val_metric
            }, os.path.join(save_dir, 'model.pth'))

        logger.write('Epoch:\t' + str(epoch))
        logger.write('Train:\t' + format_logs(train_log))
        logger.write('Val:\t' + format_logs(val_log))
        logger.write("Best:\t" + format_logs(bms))
        logger.write("\n")

        print("train:", train_log)
        print("val:", val_log)
        print("best_metric:\t" + format_logs(bms))
