"""
This code works well in terms of execution time.
I guess it's not appropriate to store losses in lists
----------------
predefine:

model names
parameters about optimizer & schedulers
save paths
----------------
variables:

best_correct_{split}
best_correct_epoch_{split}
dataset_train, dataset_val, dataset_test
dataloader_train, dataloader_val, dataloader_test
n_epoch_train, n_epoch_val, n_epoch_test
n_img_train, n_img_val, n_img_test

model_1,
optimizer_1
scheduler_1

loss_batch_1 (sum of all losses for model 1, for back prop)
loss_epoch_1 (scalar)
n_corrects_epoch_1
n_corrects_epoch_ens
cm_epoch_1
cm_epoch_ens
lr_1

imgs
labels
path

prediction_1
prediction_ens
"""
from flyai.data_helper import DataHelper
from flyai.framework import FlyAI
from flyai.utils.log_helper import train_log

import torch
from torch import nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR
from torch.utils.tensorboard import SummaryWriter

from path import *
from config import cfg, parser_info
from model import get_models, unfreeze_model, get_model_by_name
from confusion_matrix import confusion_matrix
from warm_scheduler import get_scheduler
from timer import Timer, TimeAccumulator
from logger import logger
from losses import mse_loss as criterion_mse, FocalLoss
from dataset import BaldClassificationDataset

torch.backends.cudnn.benchmark = True


def add(base: str):

    return eval(' + '.join([f'{base}_{i + 1}' for i in range_models]))


writer = SummaryWriter(LOG_PATH)

timer = Timer()

n_models = cfg.n_models
n_ens = 1 if n_models == 1 else n_models + 1

n_criteria = 1
criterion_1 = FocalLoss() if args.use_focal else nn.BCELoss()
if args.use_mse:
    n_criteria += 1
    criterion_2 = criterion_mse

range_models = range(n_models)
range_ens = range(n_ens)
range_criteria = range(n_criteria)

batch_intrvl = cfg.train_info_interval

if cfg.train_flyai:
    splits = ['train', 'val']
else:
    splits = ['train', 'val', 'test']

# --statistics-- #
for split in splits:
    # dataset & dataloader
    exec(f"dataset_{split} = BaldClassificationDataset(split='{split}')")
    exec(f"dataloader_{split} = DataLoader(dataset_{split}, batch_size=cfg.BATCH_{split}, shuffle=cfg.shuffle_{split})")
    exec(f'n_batch_{split} = len(dataloader_{split})')
    exec(f'n_img_{split} = len(dataset_{split})')
    # best metric
    exec(f'best_correct_{split} = [0 for _ in range(n_ens)]')
    exec(f'best_correct_epoch_{split} = [0 for _ in range(n_ens)]')

# save path
save_path = {'val': [eval(f'MODEL_FILE_PATH{i + 1}2') for i in range(n_models)] +
                    [eval(f'MODEL_FILE_PATH{i + 1}1') for i in range(n_models)],
             'test': [eval(f'MODEL_FILE_PATH{i + 1}4') for i in range(n_models)] +
                     [eval(f'MODEL_FILE_PATH{i + 1}3') for i in range(n_models)]
             }

# --models-- #
for i in range_models:
    exec(f'model_{i + 1} = get_model_by_name(cfg.model{i + 1})')

# --optimizers & schedulers-- #
for i in range_models:
    exec(f'optimizer_{i + 1} = torch.optim.Adam(params=filter(lambda p: p.requires_grad, model_{i + 1}.parameters()), '
         f'lr=cfg.base_lr, betas=(cfg.beta1, cfg.beta2), '
         f'eps=cfg.eps, weight_decay=cfg.weight_decay)')
    exec(f'scheduler_{i + 1} = LambdaLR(optimizer_{i + 1}, '
         f'get_scheduler(warm_epoch=cfg.warm_up{i + 1}, max_lr=cfg.lr{i + 1}, '
         f'T_0=cfg.T_0{i + 1}, T_mult=cfg.T_mult{i + 1}, eta_min=cfg.eta_min{i + 1}))')

for epoch in range(cfg.EPOCHS):
    # unfreeze
    if (epoch + 1) == cfg.unfreeze_epoch:
        for i in range_models:
            exec(f'unfreeze_model(model_{i + 1})')
            exec(f"optimizer_{i + 1}.param_groups[0]['params'] = "
                 f"list(filter(lambda p: p.requires_grad, model_{i + 1}.parameters()))")

    # --train-- #
    for i in range_models:
        exec(f'model_{i + 1}.train()')

    # confusion matrix
    for i in range_models:
        exec(f'cm_epoch_{i + 1} = torch.zeros((cfg.class_num + 1, cfg.class_num + 1)).int().cuda()')
    if n_models > 1:
        cm_epoch_ens = torch.zeros((cfg.class_num + 1, cfg.class_num + 1)).int().cuda()

    if True:
        # loss
        for i in range_models:
            exec(f'loss_epoch_{i + 1} = 0')

        # learning rate
        for i in range_models:
            exec(f'lr_{i + 1} = scheduler_{i + 1}.get_lr()[0]')

        split = 'train'
        timer.clock(f'epoch {epoch + 1} {split} begins', verbose=False)
        for batch, sample in enumerate(eval(f'dataloader_{split}')):
            imgs, labels, path = sample
            imgs = imgs.cuda()
            labels = labels.cuda()

            # forward
            for i in range_models:
                exec(f'prediction_{i + 1} = model_{i + 1}(imgs, labels)')
            if n_models > 1:
                with torch.no_grad():
                    prediction_ens = add('prediction')

            # confusion matrix
            with torch.no_grad():
                for i in range_models:
                    exec(f'confusion_matrix(labels, prediction_{i + 1}, cm_epoch_{i + 1})')
                if n_models > 1:
                    exec(f'confusion_matrix(labels, prediction_ens, cm_epoch_ens)')

            # loss
            for i in range_models:
                exec(f'loss_batch_{i + 1} = criterion_1(prediction_{i + 1}, labels)')
            for j in range(1, n_criteria):  # multi-criteria
                for i in range_models:
                    exec(f'loss_batch_{i + 1} += criterion_{j + 1}(prediction_{i + 1}, labels)')
            for i in range_models:
                exec(f'loss_epoch_{i + 1} += float(loss_batch_{i + 1})')

            # back-propagation
            for i in range_models:
                exec(f'optimizer_{i + 1}.zero_grad()')
                exec(f'loss_batch_{i + 1}.backward()')
                exec(f'optimizer_{i + 1}.step()')

            # batch_info
            if (batch_intrvl > 0) and (0 == (1 + batch) % batch_intrvl):
                info = f'[{split}]' \
                       f'[epoch {cfg.prefix}{epoch + 1}/{cfg.EPOCHS}{cfg.suffix}]' \
                       f"[batch: {batch + 1}/{eval(f'n_batch_{split}')}]"
                logger.logger.info(info)

        # scheduler
        for i in range_models:
            exec(f'scheduler_{i + 1}.step()')

        # TP + TN
        for i in range_models:
            exec(f'n_corrects_epoch_{i + 1} = float(cm_epoch_{i + 1}.diagonal()[:-1].sum())')
            if eval(f'best_correct_{split}[{i}]') < eval(f'n_corrects_epoch_{i + 1}'):
                exec(f'best_correct_{split}[{i}] = n_corrects_epoch_{i + 1}')
                exec(f'best_correct_epoch_{split}[{i}] = {epoch + 1}')
        if n_models > 1:
            n_corrects_epoch_ens = float(cm_epoch_ens.diagonal()[:-1].sum())
            if eval(f'best_correct_{split}[{n_models}]') < n_corrects_epoch_ens:
                exec(f'best_correct_{split}[{n_models}] = n_corrects_epoch_ens')
                exec(f'best_correct_epoch_{split}[{n_models}] = {epoch + 1}')

        # epoch info
        if True:
            info = f'[{split}]' \
                   f'[epoch {epoch + 1}/{cfg.EPOCHS} ends]'
            logger.logger.info(info)
            for i in range_models:
                info = f"[net: {i + 1}]" \
                       f"[lr: {eval(f'lr_{i + 1}'):.6f}]" \
                       f"[loss: {eval(f'loss_epoch_{i + 1}') / eval(f'n_batch_{split}'):.2f}]" \
                       f"[acc: {eval(f'n_corrects_epoch_{i + 1}') / eval(f'n_img_{split}') * 100: .3f}%]" \
                       f"[best acc: {eval(f'best_correct_{split}[{i}]') / eval(f'n_img_{split}') * 100: .3f}% " \
                       f"(epoch {eval(f'best_correct_epoch_{split}[{i}]')})]"
                logger.logger.info(info)

                writer.add_scalars(f'loss_{i + 1}',
                                   {split: eval(f'loss_epoch_{i + 1}') / eval(f'n_batch_{split}')}, epoch + 1)
                writer.add_scalars(f'acc_{i + 1}',
                                   {split: eval(f'n_corrects_epoch_{i + 1}') / eval(f'n_img_{split}') * 100}, epoch + 1)

            if n_models > 1:
                info = f"[net: ens]" \
                       f"[acc: {eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100: .3f}%]" \
                       f"[best acc: {eval(f'best_correct_{split}[{n_models}]') / eval(f'n_img_{split}') * 100: .3f}% " \
                       f"(epoch {eval(f'best_correct_epoch_{split}[{n_models}]')})]"
                logger.logger.info(info)

                writer.add_scalars('acc_ens',
                                   {split: eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100}, epoch + 1)

                # flyai statistics
                flyai_train_acc = eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100

        timer.clock(f'epoch {epoch + 1} {split} ends', verbose=True)

    torch.cuda.empty_cache()

    # --validation and test-- #
    for i in range_models:
        exec(f'model_{i + 1}.eval()')
    with torch.no_grad():
        # validation
        if True:
            # confusion matrix
            for i in range_models:
                exec(f'cm_epoch_{i + 1}.zero_()')
            if n_models > 1:
                cm_epoch_ens.zero_()

            # loss
            for i in range_models:
                exec(f'loss_epoch_{i + 1} = 0')

            split = 'val'
            timer.clock(f'epoch {epoch + 1} {split} starts', verbose=False)
            for batch, sample in enumerate(eval(f'dataloader_{split}')):
                imgs, labels, path = sample
                imgs = imgs.cuda()
                labels = labels.cuda()

                # forward
                for i in range_models:
                    exec(f'prediction_{i + 1} = model_{i + 1}(imgs, labels)')
                if n_models > 1:
                    prediction_ens = add('prediction')

                # confusion matrix
                for i in range_models:
                    exec(f'confusion_matrix(labels, prediction_{i + 1}, cm_epoch_{i + 1})')
                if n_models > 1:
                    exec(f'confusion_matrix(labels, prediction_ens, cm_epoch_ens)')

                # loss
                for i in range_models:
                    exec(f'loss_batch_{i + 1} = criterion_1(prediction_{i + 1}, labels)')
                for j in range(1, n_criteria):  # multi-criteria
                    for i in range_models:
                        exec(f'loss_batch_{i + 1} += criterion_{j + 1}(prediction_{i + 1}, labels)')
                for i in range_models:
                    exec(f'loss_epoch_{i + 1} += float(loss_batch_{i + 1})')

            # TP + TN
            for i in range_models:
                exec(f'n_corrects_epoch_{i + 1} = float(cm_epoch_{i + 1}.diagonal()[:-1].sum())')
                if eval(f'best_correct_{split}[{i}]') < eval(f'n_corrects_epoch_{i + 1}'):
                    exec(f'best_correct_{split}[{i}] = n_corrects_epoch_{i + 1}')
                    exec(f'best_correct_epoch_{split}[{i}] = {epoch + 1}')
                    exec(f"torch.save(model_{i + 1}.state_dict(), save_path['{split}'][{i}])")
            if n_models > 1:
                n_corrects_epoch_ens = float(cm_epoch_ens.diagonal()[:-1].sum())
                if eval(f'best_correct_{split}[{n_models}]') < n_corrects_epoch_ens:
                    exec(f'best_correct_{split}[{n_models}] = n_corrects_epoch_ens')
                    exec(f'best_correct_epoch_{split}[{n_models}] = {epoch + 1}')
                    for i in range_models:
                        exec(f"torch.save(model_{i + 1}.state_dict(), save_path['{split}'][{n_models + i}])")

            # epoch info
            if True:
                info = f'[{split}]' \
                       f'[epoch {epoch + 1}/{cfg.EPOCHS} ends]'
                logger.logger.info(info)
                for i in range_models:
                    info = f"[net: {i + 1}]" \
                           f"[loss: {eval(f'loss_epoch_{i + 1}') / eval(f'n_batch_{split}'):.2f}]" \
                           f"[acc: {eval(f'n_corrects_epoch_{i + 1}') / eval(f'n_img_{split}') * 100: .3f}%]" \
                           f"[best acc: {eval(f'best_correct_{split}[{i}]') / eval(f'n_img_{split}') * 100: .3f}% " \
                           f"(epoch {eval(f'best_correct_epoch_{split}[{i}]')})]"
                    logger.logger.info(info)

                    writer.add_scalars(f'loss_{i + 1}',
                                       {split: eval(f'loss_epoch_{i + 1}') / eval(f'n_batch_{split}')}, epoch + 1)
                    writer.add_scalars(f'acc_{i + 1}',
                                       {split: eval(f'n_corrects_epoch_{i + 1}') / eval(f'n_img_{split}') * 100},
                                       epoch + 1)

                if n_models > 1:
                    info = f"[net: ens]" \
                           f"[acc: {eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100: .3f}%]" \
                           f"[best acc: {eval(f'best_correct_{split}[{n_models}]') / eval(f'n_img_{split}') * 100: .3f}% " \
                           f"(epoch {eval(f'best_correct_epoch_{split}[{n_models}]')})]"
                    logger.logger.info(info)

                    writer.add_scalars('acc_ens',
                                       {split: eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100}, epoch + 1)

                    # flyai statistics
                    flyai_val_acc = eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100

            timer.clock(f'epoch {epoch + 1} {split} ends', verbose=True)

        torch.cuda.empty_cache()

        if cfg.train_flyai:
            train_log(train_acc=flyai_train_acc, val_acc=flyai_val_acc)


        # test
        if not cfg.train_flyai:
            # confusion matrix
            for i in range_models:
                exec(f'cm_epoch_{i + 1}.zero_()')
            if n_models > 1:
                cm_epoch_ens.zero_()

            # loss
            for i in range_models:
                exec(f'loss_epoch_{i + 1} = 0')

            split = 'test'
            timer.clock(f'epoch {epoch + 1} {split} starts', verbose=False)
            for batch, sample in enumerate(eval(f'dataloader_{split}')):
                imgs, labels, path = sample
                imgs = imgs.cuda()
                labels = labels.cuda()

                # forward
                for i in range_models:
                    exec(f'prediction_{i + 1} = model_{i + 1}(imgs, labels)')
                if n_models > 1:
                    prediction_ens = add('prediction')

                # confusion matrix
                for i in range_models:
                    exec(f'confusion_matrix(labels, prediction_{i + 1}, cm_epoch_{i + 1})')
                if n_models > 1:
                    exec(f'confusion_matrix(labels, prediction_ens, cm_epoch_ens)')

                # loss
                for i in range_models:
                    exec(f'loss_batch_{i + 1} = criterion_1(prediction_{i + 1}, labels)')
                for j in range(1, n_criteria):  # multi-criteria
                    for i in range_models:
                        exec(f'loss_batch_{i + 1} += criterion_{j + 1}(prediction_{i + 1}, labels)')
                for i in range_models:
                    exec(f'loss_epoch_{i + 1} += float(loss_batch_{i + 1})')

            # TP + TN
            for i in range_models:
                exec(f'n_corrects_epoch_{i + 1} = float(cm_epoch_{i + 1}.diagonal()[:-1].sum())')
                if eval(f'best_correct_{split}[{i}]') < eval(f'n_corrects_epoch_{i + 1}'):
                    exec(f'best_correct_{split}[{i}] = n_corrects_epoch_{i + 1}')
                    exec(f'best_correct_epoch_{split}[{i}] = {epoch + 1}')
                    exec(f"torch.save(model_{i + 1}.state_dict(), save_path['{split}'][{i}])")
            if n_models > 1:
                n_corrects_epoch_ens = float(cm_epoch_ens.diagonal()[:-1].sum())
                if eval(f'best_correct_{split}[{n_models}]') < n_corrects_epoch_ens:
                    exec(f'best_correct_{split}[{n_models}] = n_corrects_epoch_ens')
                    exec(f'best_correct_epoch_{split}[{n_models}] = {epoch + 1}')
                    for i in range_models:
                        exec(f"torch.save(model_{i + 1}.state_dict(), save_path['{split}'][{n_models + i}])")

            # epoch info
            if True:
                info = f'[{split}]' \
                       f'[epoch {epoch + 1}/{cfg.EPOCHS} ends]'
                logger.logger.info(info)
                for i in range_models:
                    info = f"[net: {i + 1}]" \
                           f"[loss: {eval(f'loss_epoch_{i + 1}') / eval(f'n_batch_{split}'):.2f}]" \
                           f"[acc: {eval(f'n_corrects_epoch_{i + 1}') / eval(f'n_img_{split}') * 100: .3f}%]" \
                           f"[best acc: {eval(f'best_correct_{split}[{i}]') / eval(f'n_img_{split}') * 100: .3f}% " \
                           f"(epoch {eval(f'best_correct_epoch_{split}[{i}]')})]"
                    logger.logger.info(info)

                    writer.add_scalars(f'loss_{i + 1}',
                                       {split: eval(f'loss_epoch_{i + 1}') / eval(f'n_batch_{split}')}, epoch + 1)
                    writer.add_scalars(f'acc_{i + 1}',
                                       {split: eval(f'n_corrects_epoch_{i + 1}') / eval(f'n_img_{split}') * 100},
                                       epoch + 1)

                if n_models > 1:
                    info = f"[net: ens]" \
                           f"[acc: {eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100: .3f}%]" \
                           f"[best acc: {eval(f'best_correct_{split}[{n_models}]') / eval(f'n_img_{split}') * 100: .3f}% " \
                           f"(epoch {eval(f'best_correct_epoch_{split}[{n_models}]')})]"
                    logger.logger.info(info)

                    writer.add_scalars('acc_ens',
                                       {split: eval(f'n_corrects_epoch_ens') / eval(f'n_img_{split}') * 100}, epoch + 1)
            timer.clock(f'epoch {epoch + 1} {split} ends', verbose=True)

        torch.cuda.empty_cache()

writer.close()
