"""
Created by Kostas Triaridis (@kostino)
in August 2023 @ ITI-CERTH
"""
import os
import argparse
import numpy as np
from torch.nn import utils
from tqdm import tqdm

from a2s_utils.a2s.loss import BTMLoss
from a2s_utils.a2s.model import A2S
from a2s_utils.cornet.model import Cornet
from common.utils import AverageMeter
from common.losses import TruForLossPhase2, TruForLoss
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import logging
import torch
import torchvision.transforms.functional as TF


from data.datasets import MixDataset
from common.metrics import computeDetectionMetrics, computeLocalizationMetrics
from models.cmnext_conf import CMNeXtWithConf
from common.split_params import group_weight
from common.lr_schedule import WarmUpPolyLR
from models.modal_extract import ModalitiesExtractor
from configs.cmnext_init_cfg import _C as config, update_config
from a2s_utils.cornet.loss import Loss as model_loss

parser = argparse.ArgumentParser(description='')
parser.add_argument('-gpu', '--gpu', type=int, default=0, help='device, use -1 for cpu')
parser.add_argument('-log', '--log', type=str, default='INFO', help='logging level')
parser.add_argument('-exp', '--exp', type=str, default='experiments/ec_example.yaml', help='Yaml experiment file')
parser.add_argument('-ckpt', '--ckpt', type=str, default="/raid/wc/MMFusion/ckpt/TruFor_ori/best_val_loss.pth", help='Localization checkpoint')
parser.add_argument('opts', help="other options", default=None, nargs=argparse.REMAINDER)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
args = parser.parse_args()

config = update_config(config, args.exp)

gpu = args.gpu
loglvl = getattr(logging, args.log.upper())
logging.basicConfig(level=loglvl, format='%(message)s')

device = 'cuda:%d' % gpu if gpu >= 0 else 'cpu'
np.set_printoptions(formatter={'float': '{: 7.3f}'.format})

if device != 'cpu':
    # cudnn setting
    import torch.backends.cudnn as cudnn

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.deterministic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED


modal_extractor = ModalitiesExtractor(config.MODEL.MODALS[1:], config.MODEL.NP_WEIGHTS)

model = CMNeXtWithConf(config.MODEL)


ckpt = torch.load(args.ckpt)

model.load_state_dict(ckpt['state_dict'], strict=False)
modal_extractor.load_state_dict(ckpt['extractor_state_dict'])


modal_extractor.to(device)
model = model.to(device)

# cornet = Cornet()
# cornet.to(device)

train = MixDataset(config.DATASET.TRAIN,
                   config.DATASET.IMG_SIZE,
                   train=True,
                   class_weight=config.DATASET.CLASS_WEIGHTS)

val = MixDataset(config.DATASET.VAL,
                 config.DATASET.IMG_SIZE,
                 train=False)

logging.info(train.get_info())
train_loader = DataLoader(train,
                          batch_size=config.BATCH_SIZE,
                          shuffle=True,
                          num_workers=config.WORKERS,
                          pin_memory=True)

val_loader = DataLoader(val,
                        batch_size=1,
                        shuffle=False,
                        num_workers=config.WORKERS,
                        pin_memory=True)

criterion = TruForLoss(weights=train.class_weights.to(device), ignore_index=-1)

os.makedirs('./ckpt/{}'.format(config.MODEL.NAME), exist_ok=True)
logdir = './{}/{}'.format(config.LOG_DIR, config.MODEL.NAME)
os.makedirs(logdir, exist_ok=True)
writer = SummaryWriter('./{}/{}'.format(config.LOG_DIR, config.MODEL.NAME))

cmnext_params = []
cmnext_params = group_weight(cmnext_params, model, torch.nn.BatchNorm2d, config.LEARNING_RATE)
params = cmnext_params

optimizer = torch.optim.SGD(params,
                            lr=config.LEARNING_RATE,
                            momentum=config.SGD_MOMENTUM,
                            weight_decay=config.WD
                            )

iters_per_epoch = len(train_loader)
iters = 0
max_iters = config.EPOCHS * iters_per_epoch
min_loss = 100

lr_schedule = WarmUpPolyLR(optimizer,
                           start_lr=config.LEARNING_RATE,
                           lr_power=0.9,
                           total_iters=max_iters,
                           warmup_steps=iters_per_epoch * config.WARMUP_EPOCHS)

scaler = torch.cuda.amp.GradScaler()

for epoch in range(config.EPOCHS):
    train.shuffle()  # for balanced sampling
    model.set_train()
    modal_extractor.set_val()
    # cornet.set_train()
    avg_loss = AverageMeter()
    btm_loss = AverageMeter()
    optimizer.zero_grad(set_to_none=True)
    pbar = tqdm(train_loader, desc='Training Epoch {}/{}'.format(epoch + 1, config.EPOCHS), unit='steps')
    for step, (images, _, masks, lab, edge) in enumerate(pbar):

        images = images.to(device, non_blocking=True)
        masks = masks.to(device, non_blocking=True)

        with torch.autocast(device_type='cuda', dtype=torch.float16):
            modals = modal_extractor(images)

            images_norm = TF.normalize(images, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            inp = [images_norm] + modals

            pred = model(inp, epoch, masks)
            # Y = cornet(inp)
            # loss_sal = model_loss(Y, masks.gt(0.5).float(), config) / config.ACCUMULATE_ITERS
            # # loss_ce = criterion(Y['final'], masks.float()) / config.ACCUMULATE_ITERS
            # sal = Y['final']
            # sal = torch.sigmoid(sal)
            # pred_fus = torch.nn.functional.softmax(pred_fus, dim=1)[:, 1:2, :, :] * sal
            ce_loss, btm_loss = criterion(pred, masks.float()) / config.ACCUMULATE_ITERS

            loss = ce_loss

        scaler.scale(loss).backward()
        if ((step + 1) % config.ACCUMULATE_ITERS == 0) or (step + 1 == len(train_loader)):
            # utils.clip_grad_norm_(model.parameters(), 0)
            scaler.step(optimizer)
            scaler.update()
            # utils.clip_grad_norm_(model.parameters(), 1.0)  # 梯度裁剪
            optimizer.zero_grad(set_to_none=True)

        avg_loss.update(loss.detach().item())
        btm_loss.update(btm_loss.detach().item())

        curr_iters = epoch * iters_per_epoch + step

        lr_schedule.step(cur_iter=curr_iters)

        writer.add_scalar('Learning Rate', optimizer.param_groups[0]['lr'], curr_iters)

        if step == 0:
            maps = torch.nn.functional.softmax(pred, dim=1)[:, 1:2, :, :]
            writer.add_images('Images-Masks-Preds',
                              torch.cat((
                                  images,
                                  torch.tile(masks, (1, 3, 1, 1)),
                                  torch.tile(maps, (1, 3, 1, 1))), -2)
                              , epoch)
            writer.add_images('masks', masks, epoch)

        pbar.set_postfix({"last_loss": loss.detach().item(), "epoch_loss": avg_loss.average(), "btm_loss": btm_loss.average()})
    writer.add_scalar('Training Loss', avg_loss.average(), epoch)
    writer.add_scalar('btm_loss', btm_loss.average(), epoch)
    f1 = []
    f1th = []
    val_loss_avg = AverageMeter()
    model.set_val()
    modal_extractor.set_val()
    pbar = tqdm(val_loader, desc='Validating Epoch {}/{}'.format(epoch + 1, config.EPOCHS), unit='steps')
    for step, (images, _, masks, lab, edge) in enumerate(pbar):
        with torch.no_grad():
            images = images.to(device, non_blocking=True)
            masks = masks.to(device, non_blocking=True)
            with torch.autocast(device_type='cuda', dtype=torch.float16):
                modals = modal_extractor(images)

                images_norm = TF.normalize(images, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                inp = [images_norm] + modals

                pred = model(inp, epoch, masks)
                # pred_fus = (torch.nn.functional.softmax(pred, dim=1)[:, 1:2, :, :] + torch.nn.functional.softmax(
                #     torch.sigmoid(Y['final']), dim=1)) / 2.0
                # sal = Y['final']
                # # 计算最小值和最大值
                # min_val = sal.min()
                # max_val = sal.max()
                # sal = (sal - min_val) / (max_val - min_val)
                # p = torch.nn.functional.softmax(pred, dim=1)[:, 1:2, :, :]
                # min_p = p.min()
                # max_p = p.max()
                # p = (p - min_p) / (max_p - min_p)
                # pred_fus = (p + sal) / 2.0
                val_loss = criterion(pred, masks.float())
            val_loss_avg.update(val_loss.detach().item())
            gt = masks.squeeze().cpu().numpy()
            map = torch.nn.functional.softmax(pred, dim=1)[:, 1:2, :, :].squeeze().cpu().numpy()
            # map = pred.squeeze().cpu().numpy()

            F1_best, F1_th = computeLocalizationMetrics(map, gt)
            f1th.append(F1_th)
            f1.append(F1_best)
    writer.add_scalar('Val Loss', val_loss_avg.average(), epoch)
    writer.add_scalar('Val F1 best', np.nanmean(f1), epoch)
    writer.add_scalar('Val F1 fixed', np.nanmean(f1th), epoch)

    if val_loss_avg.average() < min_loss:
        min_loss = val_loss_avg.average()
        result = {'epoch': epoch, 'val_loss': val_loss_avg.average(), 'val_f1_best': np.nanmean(f1),
                  'val_f1_fixed': np.nanmean(f1th), 'state_dict': model.state_dict(),
                  'extractor_state_dict': modal_extractor.state_dict()}
        # result = {'epoch': epoch, 'val_loss': val_loss_avg.average(), 'val_f1': np.nanmean(f1)
        #     , 'state_dict': model.state_dict(),'extractor_state_dict': modal_extractor.state_dict()}
        torch.save(result, './ckpt/{}/best_val_loss.pth'.format(config.MODEL.NAME))

result = {'epoch': config.EPOCHS - 1, 'val_loss': val_loss_avg.average(), 'val_f1_best': np.nanmean(f1),
          'val_f1_fixed': np.nanmean(f1th), 'state_dict': model.state_dict(),
          'extractor_state_dict': modal_extractor.state_dict()}
# result = {'epoch': config.EPOCHS - 1, 'val_loss': val_loss_avg.average(), 'val_f1': np.nanmean(f1)
#     , 'state_dict': model.state_dict(),'extractor_state_dict': modal_extractor.state_dict()}
torch.save(result, './ckpt/{}/final.pth'.format(config.MODEL.NAME))
