import copy
import logging
import numpy as np
import torch
from tqdm import tqdm

from utils.loss import AngularPenaltySMLoss
from models.single_net import StandardNet
from utils.toolkit import tensor2numpy
from .baseline import Learner as BaselineLearner

num_workers = 8


class Learner(BaselineLearner):
    def __init__(self, args, model_func=StandardNet):
        super(Learner, self).__init__(args, model_func)

    def _init_train(self, epochs, train_loader, test_loader, optimizer, scheduler):
        loss_cos = AngularPenaltySMLoss(
            loss_type="cosface",
            eps=1e-7,
            s=self.args["scale"],
            m=self.args["margin"],
        )
        prog_bar = tqdm(range(epochs))

        for _, epoch in enumerate(prog_bar):
            self._network.train()
            losses = 0.0
            correct, total = 0, 0

            for _, (_, inputs, targets) in enumerate(train_loader):
                inputs, targets = inputs.to(self._device), targets.to(self._device)
                aux_targets = targets.clone()
                aux_targets = torch.where(
                    aux_targets - self._known_classes >= 0,
                    aux_targets - self._known_classes,
                    -1,
                )

                output = self._network(inputs)
                logits = output["logits"][:, self._known_classes :]

                loss_cls = loss_cos(logits, aux_targets)
                loss_ortho = self._network.compute_loss(alpha_tp=0.0)
                loss = loss_cls + self.args["ortho"] * loss_ortho

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()

                _, preds = torch.max(logits, dim=1)

                correct += preds.eq(aux_targets.expand_as(preds)).cpu().sum()
                total += len(aux_targets)

            if scheduler:
                scheduler.step()
            train_acc = np.around(tensor2numpy(correct) * 100 / total, decimals=2)
            test_acc = self._compute_accuracy(self._network, test_loader)
            info = "Task {}, Epoch {}/{} => Loss {:.3f}, TrAcc {:.2f}, TeAcc {:.2f}".format(
                self._cur_task,
                epoch + 1,
                epochs,
                losses / len(train_loader),
                train_acc,
                test_acc,
            )
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())
            prog_bar.set_description(info)

        logging.info(info)
        report_str = f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}], Average accuracy: {np.mean(self.best_acc)}"
        logging.info(report_str)

        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)

    def _inc_train(self, epochs, train_loader, test_loader, optimizer, scheduler):
        """Full classifier training
        add distillation loss and classifier alignment loss"""
        loss_cos = AngularPenaltySMLoss(
            loss_type="cosface",
            eps=1e-7,
            s=self.args["scale"],
            m=self.args["margin"],
        )
        prog_bar = tqdm(range(epochs))

        for _, epoch in enumerate(prog_bar):
            self._network.train()
            losses = 0.0
            correct, total = 0, 0

            for i, (_, inputs, targets) in enumerate(train_loader):
                loss = 0.0
                inputs, targets = inputs.to(self._device), targets.to(self._device)
                aux_targets = targets.clone()
                aux_targets = torch.where(
                    aux_targets - self._known_classes >= 0,
                    aux_targets - self._known_classes,
                    -1,
                )

                output = self._network(inputs)
                logits = output["logits"][:, self._known_classes :]

                loss_cls = loss_cos(logits, aux_targets)
                loss_ortho = self._network.compute_loss(alpha_tp=0.0)
                loss = loss_cls + self.args["ortho"] * loss_ortho

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()

                _, preds = torch.max(logits, dim=1)

                correct += preds.eq(aux_targets.expand_as(preds)).cpu().sum()
                total += len(aux_targets)

            if scheduler:
                scheduler.step()
            train_acc = np.around(tensor2numpy(correct) * 100 / total, decimals=2)
            test_acc = self._compute_accuracy(self._network, test_loader)
            info = "Task {}, Epoch {}/{} => Loss {:.3f}, TrAcc {:.2f}, TeAcc {:.2f}".format(
                self._cur_task,
                epoch + 1,
                epochs,
                losses / len(train_loader),
                train_acc,
                test_acc,
            )
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())
            prog_bar.set_description(info)

        logging.info(info)
        report_str = (
            f"Task {self._cur_task} => "
            + f"Best accuracy: {self.best_acc_cur:.2f}[{self.best_epoch[self._cur_task]}], "
            + f"Average accuracy: {np.mean(self.best_acc):.2f}"
        )
        logging.info(report_str)
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)
