#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created by PyCharm.

@Date    : Thu Feb 25 2021 
@Time    : 05:01:28
@File    : trainer.py
@Author  : alpha
"""


import torch

from torch import nn
from pathlib import Path

from src.model import FaceSpoofNet
from src.log import logger


def train_feat(model:FaceSpoofNet,
               device,
               criterion,
               optimizer,
               train_dataloader,
               num_epoches,
               scheduler=None,
               save_path='checkpoint',
               fixbn=False):

    save_path = Path(save_path)
    save_path.mkdir(parents=True, exist_ok=True)

    model.to(device)
    model.feat_net.train()

    if fixbn:
        for m in model.feat_net.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()

    # STAGE 1
    # ONLY TRAIN FEAT_NET
    for epoch in range(num_epoches):
        step = 0
        for anchor_ims, pos_ims, neg_ims, _ in train_dataloader:
            anchor_ims = anchor_ims.to(device)
            pos_ims = pos_ims.to(device)
            neg_ims = neg_ims.to(device)

            fa, fp, fn = model.feat_forward(anchor_ims, pos_ims, neg_ims)
            loss = criterion(fa[:, :, 0, 0], fp[:, :, 0, 0], fn[:, :, 0, 0])
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 100 == 0:
                logger.info("STAGE ONE - epoch: {} - iter: {} - loss: {:.4f} ".format(epoch, step, loss.item()))

            step += 1

        save_model_path = save_path / "face_spoof_epoch{:03d}_wo_cls.pth".format(epoch)
        logger.info("Saving stage1 epoch {:>3d} model to {}".format(epoch, str(save_model_path)))

        if scheduler is not None:
            scheduler.step()

        torch.save(model.state_dict(), save_model_path, _use_new_zipfile_serialization=False)


def train_cls(model:FaceSpoofNet,
              device,
              criterion,
              optimizer,
              train_dataloader,
              test_dataloader,
              num_epoches,
              scheduler=None,
              save_path='checkpoint',
              fixbn=False):

    save_path = Path(save_path)
    save_path.mkdir(parents=True, exist_ok=True)

    # STAGE 2
    model.to(device)
    model.feat_net.eval()

    best_acc = 0
    for epoch in range(num_epoches):
        model.cls_net.train()
        if fixbn:
            for m in model.cls_net.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eval()
        step = 0
        for anchor_ims, pos_ims, neg_ims, labels in train_dataloader:
            anchor_ims = anchor_ims.to(device)
            pos_ims = pos_ims.to(device)
            neg_ims = neg_ims.to(device)
            labels = labels.to(device, dtype=torch.float)

            with torch.no_grad():
                fa, fp, fn = model.feat_forward(anchor_ims, pos_ims, neg_ims)

            pred_a, pred_p, pred_n = model.cls_forward(fa, fp, fn)
            loss = criterion(
                pred_a[:, 0, 0, 0],
                pred_p[:, 0, 0, 0],
                pred_n[:, 0, 0, 0],
                labels
            )

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 100 == 0:
                logger.info("STAGE TWO - epoch: {} - iter: {} - loss: {:.4f} ".format(epoch, step, loss.item()))

            step += 1

        if scheduler is not None:
            scheduler.step()

        save_model_path = save_path / "face_spoof_with_cls_epoch{:03d}.pth".format(epoch)
        logger.info("Saving stage2 epoch {:>3d} model to {}".format(epoch, save_model_path))

        torch.save(model.state_dict(), save_model_path, _use_new_zipfile_serialization=False)

        model.cls_net.eval()
        correct = 0

        logger.info("STAGE TWO - epoch: {} - EVALUATING...".format(epoch))
        with torch.no_grad():

            for anchor_ims, pos_ims, neg_ims, labels in test_dataloader:
                anchor_ims = anchor_ims.to(device)
                pos_ims = pos_ims.to(device)
                neg_ims = neg_ims.to(device)
                labels = labels.to(device)

                pred_a, pred_p, pred_n = model.cls_forward(*model.feat_forward(anchor_ims, pos_ims, neg_ims))

                num_correct_a = ((pred_a > 0).long()[:, 0, 0, 0] == labels).sum()
                num_correct_p = ((pred_p > 0).long()[:, 0, 0, 0] == labels).sum()
                num_correct_n = ((pred_n > 0).long()[:, 0, 0, 0] == (labels == 0).long()).sum()

                num_correct = num_correct_a + num_correct_p + num_correct_n

                correct += num_correct

        acc = correct / (3 * len(test_dataloader.dataset))
        logger.info("STAGE TWO - epoch: {} - acc: {:2.2f}%".format(epoch, acc * 100))

        if acc >= best_acc:
            best_acc = acc
            logger.info("Saving {} as the current best model.".format(save_model_path))
            torch.save(model.state_dict(), str(save_path / "face_spoof_with_cls_best.pth"), _use_new_zipfile_serialization=False)


def train_join(model:FaceSpoofNet,
               device,
               met_criterion,
               cls_criterion,
               optimizer,
               train_dataloader,
               test_dataloader,
               num_epoches,
               scheduler=None,
               save_path='checkpoint'):

    save_path = Path(save_path)
    save_path.mkdir(parents=True, exist_ok=True)
    # STAGE 2
    model.to(device)

    best_acc = 0
    for epoch in range(num_epoches):
        model.train()
        step = 0
        for anchor_ims, pos_ims, neg_ims, labels in train_dataloader:
            anchor_ims = anchor_ims.to(device)
            pos_ims = pos_ims.to(device)
            neg_ims = neg_ims.to(device)
            labels = labels.to(device, dtype=torch.float)

            fa, fp, fn = model.feat_forward(anchor_ims, pos_ims, neg_ims)
            pred_a, pred_p, pred_n = model.cls_forward(fa, fp, fn)

            loss_metric = met_criterion(fa[:, :, 0, 0], fp[:, :, 0, 0], fn[:, :, 0, 0])
            loss_cls = cls_criterion(
                pred_a[:, 0, 0, 0],
                pred_p[:, 0, 0, 0],
                pred_n[:, 0, 0, 0],
                labels
            )
            loss = 5 * loss_metric + loss_cls

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step % 100 == 0:
                logger.info(
                    "JOIN - epoch:{:>3d} - iter:{:>6d} - loss: {:.4f} - loss_metric: {:.4f} - loss_cls: {:.4f} ".format(
                        epoch, step, loss.item(), loss_metric.item(), loss_cls.item()
                    )
                )

            step += 1

        if scheduler is not None:
            scheduler.step()

        save_model_path = save_path / "face_spoof_join_epoch{:03d}.pth".format(epoch)
        logger.info("Saving epoch {:>3d} model to {}".format(epoch, str(save_model_path)))

        torch.save(model.state_dict(), save_model_path, _use_new_zipfile_serialization=False)

        model.eval()
        correct = 0

        logger.info("JOIN - epoch: {} - EVALUATING...".format(epoch))
        with torch.no_grad():
            for anchor_ims, pos_ims, neg_ims, labels in test_dataloader:
                anchor_ims = anchor_ims.to(device)
                pos_ims = pos_ims.to(device)
                neg_ims = neg_ims.to(device)
                labels = labels.to(device)

                pred_a, pred_p, pred_n = model.cls_forward(*model.feat_forward(anchor_ims, pos_ims, neg_ims))

                num_correct_a = ((pred_a > 0).long()[:, 0, 0, 0] == labels).sum()
                num_correct_p = ((pred_p > 0).long()[:, 0, 0, 0] == labels).sum()
                num_correct_n = ((pred_n > 0).long()[:, 0, 0, 0] == (labels == 0).long()).sum()

                num_correct = num_correct_a + num_correct_p + num_correct_n

                correct += num_correct

        acc = correct / (3 * len(test_dataloader.dataset))
        logger.info("JOIN - epoch: {} - acc: {:.2f}".format(epoch, acc))

        if acc >= best_acc:
            best_acc = acc
            logger.info("Saving {} as the current best model.".format(save_model_path))
            torch.save(model.state_dict(), save_path / "face_spoof_join_best.pth", _use_new_zipfile_serialization=False)