import copy
import logging
import math
import numpy as np
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm

from learners.base import BaseLearner
from models.single_net import StandardNet
from utils.loss import AngularPenaltySMLoss
from utils.toolkit import tensor2numpy

num_workers = 8


class Learner(BaseLearner):
    _network: StandardNet
    _old_network: StandardNet

    def __init__(self, args):
        super().__init__(args)
        self._network = StandardNet(args, True)

        self.args = args
        self.batch_size = args["batch_size"]
        self.init_lr = args["init_lr"]
        self.inc_lr = args["inc_lr"]
        self.ca_lr = args["ca_lr"]

        self.weight_decay = (
            args["weight_decay"] if args["weight_decay"] is not None else 0.0005
        )
        self.min_lr = args["min_lr"] if args["min_lr"] is not None else 1e-8
        self.init_cls = args["init_cls"]
        self.inc_cls = args["inc_cls"]
        self.task_sizes = []
        """Best model storage
        """
        self.best_model: dict = {}
        self.best_acc_cur = 0
        self.best_acc = []
        self.best_epoch = []

    def extract_features(self, trainloader):
        model = self._network.eval()
        embedding_list = []
        label_list = []
        with torch.no_grad():
            for i, batch in enumerate(trainloader):
                (_, data, label) = batch
                data = data.to(self._device)
                label = label.to(self._device)
                embedding = model.extract_token(data)
                embedding_list.append(embedding.cpu())
                label_list.append(label.cpu())

        embedding_list = torch.cat(embedding_list, dim=0)
        label_list = torch.cat(label_list, dim=0)
        return embedding_list, label_list

    def get_optimizer(self, lr):
        if self.args["optimizer"] == "sgd":
            optimizer = optim.SGD(
                filter(lambda p: p.requires_grad, self._network.parameters()),
                momentum=0.9,
                lr=lr,
                weight_decay=self.weight_decay,
            )
        elif self.args["optimizer"] == "adam":
            optimizer = optim.Adam(
                filter(lambda p: p.requires_grad, self._network.parameters()),
                lr=lr,
                weight_decay=self.weight_decay,
            )
        elif self.args["optimizer"] == "adamw":
            optimizer = optim.AdamW(
                filter(lambda p: p.requires_grad, self._network.parameters()),
                lr=lr,
                weight_decay=self.weight_decay,
            )

        return optimizer

    def get_scheduler(self, optimizer, epoch):
        if self.args["scheduler"] == "cosine":
            scheduler = optim.lr_scheduler.CosineAnnealingLR(
                optimizer=optimizer, T_max=epoch, eta_min=self.min_lr
            )
        elif self.args["scheduler"] == "steplr":
            scheduler = optim.lr_scheduler.MultiStepLR(
                optimizer=optimizer,
                milestones=self.args["init_milestones"],
                gamma=self.args["init_lr_decay"],
            )
        elif self.args["scheduler"] == "constant":
            scheduler = None

        return scheduler

    def incremental_train(self, data_manager):
        self.data_manager = data_manager

        self.before_task()

        self.before_train()

        self._train(self.train_loader, self.test_loader)

        self.after_train()

    def before_task(self):
        self._cur_task += 1
        self.task_size = self.data_manager.get_task_size(self._cur_task)
        self.task_sizes.append(self.task_size)
        self._total_classes = self._known_classes + self.task_size
        logging.info(
            "Learning on {}-{}".format(self._known_classes, self._total_classes - 1)
        )
        # TODO: network classifier update
        self._network.update_fc(
            self.task_size,
            fc_kwargs={
                "fc_temperture": self.args["fc_temperture"],
            },
        )

    def before_train(self):
        self.train_dataset = self.data_manager.get_dataset(
            np.arange(self._known_classes, self._total_classes),
            source="train",
            mode="train",
        )
        self.train_loader = DataLoader(
            self.train_dataset,
            batch_size=self.batch_size,
            shuffle=True,
            num_workers=num_workers,
        )

        self.test_dataset = self.data_manager.get_dataset(
            np.arange(0, self._total_classes), source="test", mode="test"
        )
        self.test_loader = DataLoader(
            self.test_dataset,
            batch_size=self.batch_size,
            shuffle=False,
            num_workers=num_workers,
        )

        self.train_dataset_for_protonet = self.data_manager.get_dataset(
            np.arange(self._known_classes, self._total_classes),
            source="train",
            mode="test",
        )
        self.train_loader_for_protonet = DataLoader(
            self.train_dataset_for_protonet,
            batch_size=self.batch_size,
            shuffle=True,
            num_workers=num_workers,
        )
        if self._cur_task > 0:
            self._network.to(self._device)
            self.train_embeddings_old, _ = self.extract_features(
                self.train_loader
            )

    def _train(self, train_loader, test_loader):
        self.best_acc_cur = 0.0
        self.best_model = {}
        self.best_acc.append(self.best_acc_cur)
        self.best_epoch.append(0)

        self._network.to(self._device)

        self._train_1st(train_loader, test_loader)

    def _train_1st(self, train_loader, test_loader):
        n_parameters = 0
        for _, param in self._network.named_parameters():
            if param.requires_grad:
                n_parameters += param.numel()
        print(f"number of trainable params: {n_parameters}")

        if self.args["verbose"]:
            from ptflops import get_model_complexity_info

            macs, params = get_model_complexity_info(
                self._network,
                (3, 224, 224),
                as_strings=True,
                print_per_layer_stat=True,
                verbose=True,
            )
            print(f"MACs: {macs,params}")

            intt = torch.randn(1, 3, 224, 224)
            from fvcore.nn import FlopCountAnalysis
            from fvcore.nn import flop_count_table

            flops = FlopCountAnalysis(self._network, intt.to(self._device))
            flops.total()
            print(flop_count_table(flops))
        """======================================================================
        """

        if self._cur_task == 0:
            epochs = self.args["init_epochs"]
            optimizer = self.get_optimizer(lr=self.args["init_lr"])
        else:
            epochs = self.args["inc_epochs"]
            optimizer = self.get_optimizer(lr=self.args["inc_lr"])
        scheduler = self.get_scheduler(optimizer, epochs)

        train_func = (
            self._init_train if self._cur_task == 0 else self._inc_train
        )
        train_func(
            epochs,
            train_loader,
            test_loader,
            optimizer=optimizer,
            scheduler=scheduler,
        )

    def _init_train(
        self, epochs, train_loader, test_loader, optimizer, scheduler
    ):
        loss_cos = AngularPenaltySMLoss(
            loss_type="cosface",
            eps=1e-7,
            s=self.args["scale"],
            m=self.args["margin"],
        )
        prog_bar = tqdm(range(epochs))

        for _, epoch in enumerate(prog_bar):
            self._network.train()
            losses = 0.0
            correct, total = 0, 0

            for i, (_, inputs, targets) in enumerate(train_loader):
                inputs, targets = inputs.to(self._device), targets.to(
                    self._device
                )
                aux_targets = targets.clone()
                aux_targets = torch.where(
                    aux_targets - self._known_classes >= 0,
                    aux_targets - self._known_classes,
                    -1,
                )

                output = self._network(inputs)
                logits = output["logits"][:, self._known_classes :]

                # loss = F.cross_entropy(
                #     logits * self.args["scale"], aux_targets
                # )
                loss = loss_cos(logits, aux_targets)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()

                _, preds = torch.max(logits, dim=1)

                correct += preds.eq(aux_targets.expand_as(preds)).cpu().sum()
                total += len(aux_targets)

            if scheduler:
                scheduler.step()
            train_acc = np.around(
                tensor2numpy(correct) * 100 / total, decimals=2
            )
            test_acc = self._compute_accuracy(self._network, test_loader)
            info = "Task {}, Epoch {}/{} => Loss {:.3f}, TrAcc {:.2f}, TeAcc {:.2f}".format(
                self._cur_task,
                epoch + 1,
                epochs,
                losses / len(train_loader),
                train_acc,
                test_acc,
            )
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())
            prog_bar.set_description(info)

        logging.info(info)
        report_str = f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}], Average accuracy: {np.mean(self.best_acc)}"
        logging.info(report_str)
        self._network.load_state_dict(self.best_model)

    def _inc_train(
        self, epochs, train_loader, test_loader, optimizer, scheduler
    ):
        """Full classifier training
        add distillation loss and classifier alignment loss"""
        loss_cos = AngularPenaltySMLoss(
            loss_type="cosface",
            eps=1e-7,
            s=self.args["scale"],
            m=self.args["margin"],
        )
        prog_bar = tqdm(range(epochs))

        for _, epoch in enumerate(prog_bar):
            self._network.train()
            losses = 0.0
            losses_dis = 0.0
            correct, total = 0, 0

            for i, (_, inputs, targets) in enumerate(train_loader):
                loss = 0.0
                inputs, targets = inputs.to(self._device), targets.to(
                    self._device
                )
                aux_targets = targets.clone()
                aux_targets = torch.where(
                    aux_targets - self._known_classes >= 0,
                    aux_targets - self._known_classes,
                    -1,
                )

                output = self._network(inputs)
                logits = output["logits"][:, self._known_classes :]

                loss_cls = loss_cos(logits, aux_targets)
                loss_dis = self.dis_patch_loss(inputs)
                loss = loss_cls + (loss_dis) * self.args["distill_alpha"]

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()
                losses_dis += loss_dis.item()

                _, preds = torch.max(logits, dim=1)

                correct += preds.eq(aux_targets.expand_as(preds)).cpu().sum()
                total += len(aux_targets)

            if scheduler:
                scheduler.step()
            train_acc = np.around(
                tensor2numpy(correct) * 100 / total, decimals=2
            )
            test_acc = self._compute_accuracy(self._network, test_loader)
            info = "Task {}, Epoch {}/{} => Loss {:.3f}|{:.3f}, TrAcc {:.2f}, TeAcc {:.2f}".format(
                self._cur_task,
                epoch + 1,
                epochs,
                losses / len(train_loader),
                losses_dis / len(train_loader),
                train_acc,
                test_acc,
            )
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())
            prog_bar.set_description(info)

        logging.info(info)
        report_str = (
            f"Task {self._cur_task} => "
            + f"Best accuracy: {self.best_acc_cur:.2f}[{self.best_epoch[self._cur_task]}], "
            + f"Average accuracy: {np.mean(self.best_acc):.2f}"
        )
        logging.info(report_str)
        self._network.load_state_dict(self.best_model)

    def after_train(self):
        if len(self._multiple_gpus) > 1:
            self._network = self._network.module  # type: ignore

        if self._cur_task > 0:
            self.train_embeddings_new, _ = self.extract_features(
                self.train_loader
            )

        self._compute_class_mean(self.data_manager)
        task_size = self.data_manager.get_task_size(self._cur_task)

        if (
            self._cur_task > 0
            and self.args["ca_epochs"] > 0
            and self.args["ca"] is True
        ):
            self._stage2_compact_classifier(
                task_size,
            )
            if len(self._multiple_gpus) > 1:
                self._network = self._network.module  # type: ignore

    def after_task(self):
        self._known_classes = self._total_classes
        self._network.freeze()
        self._network.after_task()
        self._old_network = copy.deepcopy(self._network)
        self._old_network.requires_grad_(False)
        self._old_network.eval()

        # dir_path = f"./ckps_moe/{self.args['dataset']}/{self.inc_cls}"
        # if not os.path.exists(dir_path):
        #     os.makedirs(dir_path)
        # ckp_path = "{}/{}_{}.pth".format(
        #     dir_path, self.args["dataset"], self._cur_task
        # )
        # network_copy = copy.deepcopy(self._network)
        # network_copy.to("cpu")
        # torch.save(network_copy, ckp_path)

    def _compute_class_mean(self, data_manager):
        if getattr(self, "_class_means", None) is not None:
            ori_classes = self._class_means.shape[0]
            assert ori_classes == self._known_classes
            new_class_means = np.zeros((self._total_classes, self.feature_dim))
            new_class_means[: self._known_classes] = self._class_means
            self._class_means = new_class_means
            # new_class_cov = np.zeros((self._total_classes, self.feature_dim, self.feature_dim))
            new_class_cov = torch.zeros(
                (self._total_classes, self.feature_dim, self.feature_dim)
            )
            new_class_cov[: self._known_classes] = self._class_covs
            self._class_covs = new_class_cov
        else:
            self._class_means = np.zeros(
                (self._total_classes, self.feature_dim), dtype=np.float32
            )
            # self._class_covs = np.zeros((self._total_classes, self.feature_dim, self.feature_dim))
            self._class_covs = torch.zeros(
                (self._total_classes, self.feature_dim, self.feature_dim)
            )

        radius = []
        for class_idx in range(self._known_classes, self._total_classes):
            _, _, idx_dataset = data_manager.get_dataset(
                np.arange(class_idx, class_idx + 1),
                source="train",
                mode="test",
                ret_data=True,
            )
            idx_loader = DataLoader(
                idx_dataset,
                batch_size=self.batch_size,
                shuffle=False,
                num_workers=4,
            )
            # [nb_samples, feature_dim]
            vectors, _ = self._extract_tokens(idx_loader)
            vectors = tensor2numpy(vectors, dtype=np.float32)
            # vectors = np.concatenate([vectors_aug, vectors])

            class_mean = np.mean(vectors, axis=0)
            if self._cur_task == 0:
                # calcualte cov along the row,
                # each column represents a class feature, each row represents a class attri
                cov = np.cov(vectors.T) + np.eye(class_mean.shape[-1]) * 1e-4
                radius.append(np.trace(cov) / 768)
            class_cov = (
                torch.cov(torch.tensor(vectors, dtype=torch.float64).T)
                + torch.eye(class_mean.shape[-1]) * 1e-3
            )

            self._class_means[class_idx, :] = class_mean
            self._class_covs[class_idx, ...] = class_cov

        if self._cur_task == 0:
            self.radius = np.sqrt(np.mean(radius))
            print(self.radius)

    def _stage2_compact_classifier(self, task_size):
        for p in self._network.fc.parameters():
            p.requires_grad = True
        self.logit_norm = self.args["ca_with_logit_norm"]
        run_epochs = self.args["ca_epochs"]
        # crct_num = self._total_classes
        param_list = [
            p for p in self._network.fc.parameters() if p.requires_grad
        ]
        network_params = [
            {
                "params": param_list,
                "lr": self.ca_lr,
                "weight_decay": self.weight_decay,
            }
        ]

        optimizer = optim.SGD(
            network_params,
            lr=self.ca_lr,
            momentum=0.9,
            weight_decay=self.weight_decay,
        )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=run_epochs
        )

        self._network.to(self._device)
        self._network.eval()
        for epoch in range(run_epochs):
            losses = 0.0

            for i, (_, inputs, targets) in enumerate(self.train_loader):
                loss = 0.0
                inputs, targets = inputs.to(self._device), targets.to(
                    self._device
                )

                loss_align = self.align_loss(
                    inputs,
                    targets,
                    self._class_means,
                    alpha=self.args["align_alpha"],
                )
                loss = loss_align

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()
            scheduler.step()

            test_acc = self._compute_accuracy(self._network, self.test_loader)
            info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
                self._cur_task, losses / self._total_classes, test_acc
            )
            logging.info(info)
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = epoch
                self.best_model = copy.deepcopy(self._network.state_dict())

        report_str = f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}], Average accuracy: {np.mean(self.best_acc)}"
        logging.info(report_str)
        self._network.load_state_dict(self.best_model)

    def _compute_accuracy(self, model, loader):
        model.eval()
        correct, total = 0, 0
        for i, (_, inputs, targets) in enumerate(loader):
            inputs = inputs.to(self._device)
            with torch.no_grad():
                outputs = model.forward(inputs)["logits"]
            predicts = torch.max(outputs, dim=1)[1]
            correct += (predicts.cpu() == targets).sum()
            total += len(targets)

        return np.around(tensor2numpy(correct) * 100 / total, decimals=2)

    def _eval_cnn(self, loader):
        calc_task_acc = True

        if calc_task_acc:
            task_correct, task_acc, total = 0, 0, 0

        self._network.eval()
        y_pred, y_true = [], []
        for _, (_, inputs, targets) in enumerate(loader):
            inputs = inputs.to(self._device)

            with torch.no_grad():
                outputs = self._network.forward(inputs)["logits"]
            predicts = torch.topk(
                outputs, k=self.topk, dim=1, largest=True, sorted=True
            )[
                1
            ]  # [bs, topk]
            y_pred.append(predicts.cpu().numpy())
            y_true.append(targets.cpu().numpy())

            # calculate the accuracy by using task_id
            if calc_task_acc:
                task_ids = (targets - self.init_cls) // self.inc_cls + 1
                task_logits = torch.zeros(outputs.shape).to(self._device)
                for i, task_id in enumerate(task_ids):
                    if task_id == 0:
                        start_cls = 0
                        end_cls = self.init_cls
                    else:
                        start_cls = self.init_cls + (task_id - 1) * self.inc_cls
                        end_cls = self.init_cls + task_id * self.inc_cls
                    task_logits[i, start_cls:end_cls] += outputs[
                        i, start_cls:end_cls
                    ]
                # calculate the accuracy of task_id
                pred_task_ids = (
                    torch.max(outputs, dim=1)[1] - self.init_cls
                ) // self.inc_cls + 1
                task_correct += (pred_task_ids.cpu() == task_ids).sum()

                pred_task_y = torch.max(task_logits, dim=1)[1]
                task_acc += (pred_task_y.cpu() == targets).sum()
                total += len(targets)

        if calc_task_acc:
            logging.info(
                "Task correct: {}".format(
                    tensor2numpy(task_correct) * 100 / total
                )
            )
            logging.info(
                "Task acc: {}".format(tensor2numpy(task_acc) * 100 / total)
            )

        return np.concatenate(y_pred), np.concatenate(y_true)  # [N, topk]

    def _eval_nme(self, loader, class_means):
        self._network.eval()
        vectors, y_true = self._extract_tokens(loader)
        B, D = vectors.size()
        class_means = class_means.astype(np.float32)
        class_means = torch.tensor(copy.deepcopy(class_means)).to(self._device)
        fc_inp = vectors
        out = 1 * (
            F.linear(
                F.normalize(fc_inp, p=2, dim=1),
                F.normalize(class_means, p=2, dim=1),  # type: ignore
            )
        )
        scores = tensor2numpy(out)

        return (
            np.argsort(-scores, axis=1)[:, : self.topk],
            tensor2numpy(y_true, dtype=np.int32),  # type: ignore
        )  # [N, topk]

    def _extract_tokens(self, loader):
        self._network.eval()
        vectors, targets = [], []

        with torch.no_grad():
            for _, _inputs, _targets in loader:
                _targets = _targets
                _vectors = self._network.extract_token(_inputs.to(self._device))

                vectors.append(_vectors)
                targets.append(_targets)
        return torch.cat(vectors, dim=0), torch.cat(targets, dim=0)

    def dis_patch_loss(self, inputs):
        # [nb_task, B, L + 1, D]\
        with torch.no_grad():
            feats_old = self._old_network.extract_feats(inputs)
        feats_new = self._network.extract_feats(inputs)
        cls_token = feats_new[:, 0, :]
        cos_score = torch.einsum(
            "BD,BLD->BL",
            F.normalize(cls_token, p=2, dim=-1),
            F.normalize(feats_new[:, 1:, :], p=2, dim=-1),
        )
        score = self._prob(-cos_score)

        # [B, L, D]
        diff = F.normalize(feats_new[:, 1:, :], p=2, dim=-1) - F.normalize(
            feats_old[:, 1:, :], p=2, dim=-1
        )
        # compute the norm of diff
        diff = torch.norm(diff, p=2, dim=-1)
        # Compute square error
        distill_loss = torch.mean(score * diff)

        return distill_loss

    def align_loss(self, inputs, targets, class_means: np.ndarray, alpha=0.6):
        with torch.no_grad():
            feats_new = self._network.extract_feats(inputs)

        length = inputs.size(0) if inputs.size(0) > 1 else 32
        indices = torch.randint(low=0, high=self._total_classes, size=(length,))
        # sample B protos from old classes
        proto_aug = torch.FloatTensor(class_means[indices]).to(feats_new.device)
        targets_aug = indices.long().to(feats_new.device)

        pat_embs = feats_new[:, 1:, :].reshape(-1, feats_new.size(-1))
        assert proto_aug.shape[-1] == pat_embs.shape[-1], (
            proto_aug.shape,
            pat_embs.shape,
        )
        assert proto_aug.dim() == 2 and pat_embs.dim() == 2, (
            proto_aug.dim(),
            pat_embs.dim(),
        )
        sim = torch.einsum(
            "BD,KD->BK",
            F.normalize(proto_aug, p=2, dim=-1),
            F.normalize(pat_embs, p=2, dim=-1),
        )
        # [B, topk]
        topk_s, topk_idx = torch.topk(sim, k=20, dim=-1)
        score = nn.functional.softmax(topk_s, dim=-1)
        # [B, K, D]
        select_pats = pat_embs[None, ...].repeat(proto_aug.size(0), 1, 1)
        select_pats = select_pats.gather(
            dim=1,
            index=topk_idx[..., None].expand(-1, -1, select_pats.size(-1)),
        )
        proto_aug = alpha * proto_aug + (1 - alpha) * torch.einsum(
            "BK,BKD->BD",
            score,
            select_pats,
        )
        logits = self._network(proto_aug, ca=True)["logits"]
        loss = F.cross_entropy(logits * self.args["scale"], targets_aug)
        return loss

    @staticmethod
    def _prob(x):
        """
        Args:
            x: input tensor
                [batch, 1, 1]
        """
        pi = torch.tensor(math.pi)
        result = (pi - torch.acos(x)) / pi
        return result
