"""
Created by Kostas Triaridis (@kostino)
in August 2023 @ ITI-CERTH
"""
import os
import argparse
import numpy as np
from tqdm import tqdm
from common.utils import AverageMeter
from common.losses import TruForLoss, TruForLoss_sal
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import logging
import torch
import torchvision.transforms.functional as TF
from torch.nn import functional as F
from data.datasets import MixDataset
from common.metrics import computeLocalizationMetrics
from loss import SSIM, FocalLoss
from models.cmnext_conf_3 import CMNeXtWithConf
from common.split_params import group_weight
from common.lr_schedule import WarmUpPolyLR
from models.modal_extract import ModalitiesExtractor
from configs.cmnext_init_cfg import _C as config, update_config
import utils_iml.iml_transforms, utils_iml.datasets
available_device = 1
parser = argparse.ArgumentParser(description='')
parser.add_argument('-gpu', '--gpu', type=int, default=available_device, help='device, use -1 for cpu')
parser.add_argument('-log', '--log', type=str, default='INFO', help='logging level')
parser.add_argument('-train_bayar', '--train_bayar', default=False, action='store_true', help='finetune bayar conv')
parser.add_argument('-exp', '--exp', type=str, default='experiments/ec_example.yaml', help='Yaml experiment file')
parser.add_argument('opts', help="other options", default=None, nargs=argparse.REMAINDER)

os.environ['CUDA_VISIBLE_DEVICES'] = '{}'.format(available_device)

args = parser.parse_args()

config = update_config(config, args.exp)

gpu = args.gpu
loglvl = getattr(logging, args.log.upper())
logging.basicConfig(level=loglvl, format='%(message)s')

device = 'cuda:%d' % gpu if gpu >= 0 else 'cpu'
# device = 'cuda'
np.set_printoptions(formatter={'float': '{: 7.3f}'.format})

torch.set_flush_denormal(True)
if device != 'cpu':
    # cudnn setting
    import torch.backends.cudnn as cudnn

    cudnn.benchmark = config.CUDNN.BENCHMARK
    cudnn.deterministic = config.CUDNN.DETERMINISTIC
    cudnn.enabled = config.CUDNN.ENABLED


modal_extractor = ModalitiesExtractor(config.MODEL.MODALS[1:], config.MODEL.NP_WEIGHTS)
if 'bayar' in config.MODEL.MODALS:
    modal_extractor.load_state_dict(torch.load('pretrained/modal_extractor/bayar_mhsa.pth',map_location=torch.device('cpu')), strict=False)
    if not args.train_bayar:
        modal_extractor.bayar.eval()
        for param in modal_extractor.bayar.parameters():
            param.requires_grad = False

model = CMNeXtWithConf(config.MODEL)


modal_extractor.to(device)
model = model.to(device)

# print(config.DATASET.TRAIN)

# IML-ViT DataLoader
# train_transform =  utils_iml.iml_transforms.get_albu_transforms('train')
# test_transform =  utils_iml.iml_transforms.get_albu_transforms('test')
# data_path = '/home/wc/disk1/IML-ViL/images/dataset/CASIAV2.txt'
# test_data_path = '/home/wc/disk1/IML-ViL/images/dataset/CASIAV1.txt'
# dataset_train = utils_iml.datasets.json_dataset(data_path,transform=train_transform, edge_width = 7)
# dataset_test = utils_iml.datasets.json_dataset(test_data_path,transform=test_transform, edge_width = 7, if_return_shape=True)
#
# print(dataset_train)
# print(dataset_test)
#
# sampler_train = torch.utils.data.RandomSampler(dataset_train)
# sampler_test = torch.utils.data.RandomSampler(dataset_test)
#
# data_loader_train = torch.utils.data.DataLoader(
#     dataset_train, sampler=sampler_train,
#     batch_size=config.BATCH_SIZE,
#     num_workers=config.WORKERS,
#     pin_memory=True,
#     drop_last=True,
# )
#
# data_loader_test = torch.utils.data.DataLoader(
#     dataset_test, sampler=sampler_test,
#     batch_size=1,
#     num_workers=config.WORKERS,
#     pin_memory=True,
#     drop_last=False,
# )

# TruFor DataLoader
train = MixDataset(config.DATASET.TRAIN,
                   config.DATASET.IMG_SIZE,
                   train=True,
                   class_weight=config.DATASET.CLASS_WEIGHTS)

val = MixDataset(config.DATASET.VAL,
                 config.DATASET.IMG_SIZE,
                 train=False)

logging.info(train.get_info())
train_loader = DataLoader(train,
                          batch_size=config.BATCH_SIZE,
                          shuffle=True,
                          num_workers=config.WORKERS,
                          pin_memory=True)

val_loader = DataLoader(val,
                        batch_size=1,
                        shuffle=False,
                        num_workers=config.WORKERS,
                        pin_memory=True)

criterion = TruForLoss(weights=train.class_weights.to(device), ignore_index=-1,device=device)
criterion_sal = TruForLoss_sal(ignore_index=-1,device=device)
# criterion = TruForLoss(ignore_index=-1)

# loss_l2 = torch.nn.modules.loss.MSELoss()
# loss_ssim = SSIM()
# loss_focal = FocalLoss()
# criterion_bce = torch.nn.CrossEntropyLoss(ignore_index=-1)

os.makedirs('./ckpt/{}'.format(config.MODEL.NAME), exist_ok=True)
logdir = './{}/{}'.format(config.LOG_DIR, config.MODEL.NAME)
os.makedirs(logdir, exist_ok=True)
writer = SummaryWriter('./{}/{}'.format(config.LOG_DIR, config.MODEL.NAME))

params = []
cmnext_params = []
modal_extract_params = []
cmnext_params = group_weight(cmnext_params, model, torch.nn.BatchNorm2d, config.LEARNING_RATE)
modal_extract_params = group_weight(modal_extract_params, modal_extractor, torch.nn.BatchNorm2d, config.LEARNING_RATE)

params.append(dict(params=cmnext_params[0]['params'] + modal_extract_params[0]['params'], lr=config.LEARNING_RATE))
params.append(dict(params=cmnext_params[1]['params'] + modal_extract_params[1]['params'], weight_decay=.0,
                   lr=config.LEARNING_RATE))

optimizer = torch.optim.SGD(params,
                            lr=config.LEARNING_RATE,
                            momentum=config.SGD_MOMENTUM,
                            weight_decay=config.WD
                            )

iters_per_epoch = len(train_loader)
iters = 0
max_iters = config.EPOCHS * iters_per_epoch
min_loss = 100
max_val = 0.0

lr_schedule = WarmUpPolyLR(optimizer,
                           start_lr=config.LEARNING_RATE,
                           lr_power=config.POLY_POWER,
                           total_iters=max_iters,
                           warmup_steps=iters_per_epoch * config.WARMUP_EPOCHS)

scaler = torch.cuda.amp.GradScaler()


for epoch in range(config.EPOCHS):
    train.shuffle()  # for balanced sampling
    model.set_train()
    if args.train_bayar:
        modal_extractor.set_train()
    avg_loss = AverageMeter()
    # if config.MODEL.ENM:
    # btm_loss_meter = AverageMeter()
    # if config.MODEL.AAM:
    de_loss_meter = AverageMeter()  # ----best---- #
    optimizer.zero_grad(set_to_none=True)
    pbar = tqdm(train_loader, desc='Training Epoch {}/{}'.format(epoch + 1, config.EPOCHS), unit='steps')
    for step, (images, _, masks, _, edge) in enumerate(pbar):
    # for step, (images, masks, edge,ori_img) in enumerate(pbar):

        images = images.to(device, non_blocking=True)
        masks = masks.squeeze(1).to(device, non_blocking=True)
        # edge = edge.to(device, non_blocking=True)
        with torch.autocast(device_type='cuda', dtype=torch.float16):
            modals = modal_extractor(images)

            images_norm = TF.normalize(images, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
            inp = [images_norm] + modals
            # pred, cdo_loss, _, _ = model(inp, ori_img, masks)
            if config.MODEL.AAM:
                pred, out2 = model(inp, epoch, masks)
            if config.MODEL.ENM and not config.MODEL.AAM:
                pred = model(inp, epoch, masks)

            p = torch.nn.functional.softmax(pred, dim=1)[:, 1, :, :]
            # edge_loss = F.binary_cross_entropy_with_logits(
            #     input=pred[:, 1, :, :],
            #     target=masks.float(),
            #     weight=edge.squeeze(1)
            # ) * 10.0
            # loss = criterion(pred, masks.long(),inp,epoch) / config.ACCUMULATE_ITERS
            ####new_best####

            if config.MODEL.ENM:
                ce_loss, btm_loss = criterion(pred, masks.long(), inp, epoch)
                ce_loss = ce_loss / config.ACCUMULATE_ITERS
                loss = ce_loss
            if config.MODEL.AAM:
                # ce_loss , btm_loss= criterion(pred, masks.long(), inp, epoch)
                ce_loss = criterion(pred, masks.long(), inp, epoch)
                ce_loss = ce_loss / config.ACCUMULATE_ITERS
                out2 = torch.cat((out2, out2), dim=1)
                de_loss = criterion_sal(out2, masks.long(), epoch)
                de_loss = de_loss / config.ACCUMULATE_ITERS
                loss = ce_loss + de_loss
        scaler.scale(loss).backward()
        if ((step + 1) % config.ACCUMULATE_ITERS == 0) or (step + 1 == len(train_loader)):
            scaler.step(optimizer)
            scaler.update()
            optimizer.zero_grad(set_to_none=True)

        avg_loss.update(loss.detach().item())
        # if config.MODEL.ENM:
        # btm_loss_meter.update(btm_loss.detach().item())
        # if config.MODEL.AAM:
        de_loss_meter.update(de_loss.detach().item())
        # edge_loss_meter.update(edge_loss.detach().item())

        curr_iters = epoch * iters_per_epoch + step
        lr_schedule.step(cur_iter=curr_iters)
        writer.add_scalar('Learning Rate', optimizer.param_groups[0]['lr'], curr_iters)

        if step == 0:
            maps = torch.nn.functional.softmax(pred, dim=1)[:, 1:2, :, :]
            writer.add_images('Images-Masks-Preds',
                              torch.cat((
                                  images,
                                  torch.tile(masks.unsqueeze(1), (1, 3, 1, 1)),
                                  torch.tile(maps, (1, 3, 1, 1))), -2)
                              , epoch)
            writer.add_images('masks', masks.unsqueeze(1), epoch)
            # writer.add_images('edge_mask', edge, epoch)

        # if config.MODEL.ENM:
            # pbar.set_postfix({"last_loss": loss.detach().item(), "epoch_loss": avg_loss.average() , "btm_loss": btm_loss_meter.average()})
        if config.MODEL.AAM:
            pbar.set_postfix({"last_loss": loss.detach().item(), "epoch_loss": avg_loss.average(),"de_loss":de_loss_meter.average()})  # ----best---- #
    writer.add_scalar('Training Loss', avg_loss.average(), epoch)
    # if config.MODEL.ENM:
    # writer.add_scalar('btm_loss', btm_loss_meter.average(), epoch)
    # if config.MODEL.AAM:
    writer.add_scalar('de_loss', de_loss_meter.average(), epoch)     # ----best---- #
    # writer.add_scalar('Edge Loss', edge_loss_meter.average(), epoch)

    f1 = []
    f1th = []
    val_loss_avg = AverageMeter()
    model.set_val()
    modal_extractor.set_val()
    pbar = tqdm(val_loader, desc='Validating Epoch {}/{}'.format(epoch + 1, config.EPOCHS), unit='steps')
    for step, (images, _, masks, lab, edge) in enumerate(pbar):
    # for step, (images, masks, edge, ori_img,shape) in enumerate(pbar):
        with torch.no_grad():
            images = images.to(device, non_blocking=True)
            masks = masks.squeeze(1).to(device, non_blocking=True)
            # ori_img = ori_img.to(device, non_blocking=True)
            # foreground = foreground.squeeze(1).to(device, non_blocking=True)
            # background = background.squeeze(1).to(device, non_blocking=True)
            with torch.autocast(device_type='cuda', dtype=torch.float16):
                modals = modal_extractor(images)
                images_norm = TF.normalize(images, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                inp = [images_norm] + modals
                # pred = model(inp)
                if config.MODEL.ENM:
                    pred = model(inp, epoch, masks)
                if config.MODEL.AAM:
                    pred, _ = model(inp, epoch, masks)
                # if config.MODEL.ENM:
                # val_loss, btm_loss = criterion(pred, masks.long(),inp, epoch)  #new
                val_loss = criterion(pred, masks.long(), inp, epoch)
                # else:
                #     val_loss = criterion(pred, masks.long(),inp, epoch)
            val_loss_avg.update(val_loss.detach().item())
            gt = masks.squeeze().cpu().numpy()
            map = torch.nn.functional.softmax(pred, dim=1)[:, 1:2, :, :].squeeze().cpu().numpy()
            # gt = masks.unsqueeze(0)
            # map = torch.nn.functional.softmax(pred, dim=1)[:, 1, :, :].unsqueeze(0)
            # shape = [(gt.shape[2], gt.shape[3])]
            F1_best, F1_th = computeLocalizationMetrics(map, gt)
            # local_f1 = computeLocalizationMetrics(map, gt,shape)
            # f1.append(local_f1.cpu().numpy())
            f1th.append(F1_th)
            f1.append(F1_best)

    writer.add_scalar('Val Loss', val_loss_avg.average(), epoch)
    # writer.add_scalar('Val F1', np.nanmean(f1), epoch)
    writer.add_scalar('Val F1 best', np.nanmean(f1), epoch)
    writer.add_scalar('Val F1 fixed', np.nanmean(f1th), epoch)

    # if np.nanmean(f1th) > max_val:
    # max_val = np.nanmean(f1th)
    # result = {'epoch': epoch, 'val_loss': val_loss_avg.average(), 'val_f1_best': np.nanmean(f1),
    #           'val_f1_fixed': np.nanmean(f1th), 'state_dict': model.state_dict(),
    #           'extractor_state_dict': modal_extractor.state_dict()}
    # # result = {'epoch': epoch, 'val_loss': val_loss_avg.average(), 'val_f1': np.nanmean(f1)
    # #     , 'state_dict': model.state_dict(),'extractor_state_dict': modal_extractor.state_dict()}
    # torch.save(result, './ckpt/{}/{}.pth'.format(config.MODEL.NAME, epoch))

    if val_loss_avg.average() < min_loss:
        min_loss = val_loss_avg.average()
        result = {'epoch': epoch, 'val_loss': val_loss_avg.average(), 'val_f1_best': np.nanmean(f1),
                  'val_f1_fixed': np.nanmean(f1th), 'state_dict': model.state_dict(),
                  'extractor_state_dict': modal_extractor.state_dict()}
        # result = {'epoch': epoch, 'val_loss': val_loss_avg.average(), 'val_f1': np.nanmean(f1)
        #     , 'state_dict': model.state_dict(),'extractor_state_dict': modal_extractor.state_dict()}
        torch.save(result, './ckpt/{}/best_val_loss.pth'.format(config.MODEL.NAME))

result = {'epoch': config.EPOCHS - 1, 'val_loss': val_loss_avg.average(), 'val_f1_best': np.nanmean(f1),
          'val_f1_fixed': np.nanmean(f1th), 'state_dict': model.state_dict(),
          'extractor_state_dict': modal_extractor.state_dict()}
# result = {'epoch': config.EPOCHS - 1, 'val_loss': val_loss_avg.average(), 'val_f1': np.nanmean(f1)
#     , 'state_dict': model.state_dict(),'extractor_state_dict': modal_extractor.state_dict()}
torch.save(result, './ckpt/{}/final.pth'.format(config.MODEL.NAME))
