import copy
import logging
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F

from .baseline import Learner as BaselineLearner
from models.single_net import BaseSingle


# FIXME: This is a temporary solution to load the ViT models for fine-tuning
# have problem with logtis scaling
class Learner(BaselineLearner):
    _network: BaseSingle
    _old_network: BaseSingle

    def __init__(self, args, model_func=BaseSingle):
        super(Learner, self).__init__(args, model_func)
        self.ca_lr = args["ca_lr"]
        self.ca_epochs = args["ca_epochs"]
        self.ca_forward = lambda model, inputs: model(inputs, ca=True)

        assert args["use_proto"] in [True, False]
        self.use_proto = args["use_proto"]

    def after_train(self):
        super().after_train()
        if self.use_proto:
            print("\n" + "=" * 50)
            print("Update fc weights with Prototypes")
            print("=" * 50 + "\n")
            self.replace_fc()

        task_size = self.data_manager.get_task_size(self._cur_task)
        if self.ca_epochs > 0:
            print("\n" + "=" * 50)
            print("Classifier Alignment Finetuning")
            print("=" * 50 + "\n")
            self._train_clf_alignment(
                task_size,
            )

    def replace_fc(
        self,
    ):
        model = self._network
        model = model.eval()

        index_range = list(range(self._known_classes, self._total_classes))
        cls_protos = self._class_means[index_range]
        model.fc.heads[-1].weight.data = cls_protos.to(
            dtype=torch.float32, device=self._device
        )

    def _train_clf_alignment(self, task_size):
        for p in self._network.fc.parameters():
            p.requires_grad = True

        param_list = [
            p for p in self._network.fc.parameters() if p.requires_grad
        ]
        network_params = [
            {
                "params": param_list,
                "lr": self.ca_lr,
                "weight_decay": self.weight_decay,
            }
        ]

        # print trainable parameters, for debugging
        for name, param in self._network.fc.named_parameters():
            if param.requires_grad:
                print(name)

        optimizer = optim.SGD(
            network_params,
            lr=self.ca_lr,
            momentum=0.9,
            weight_decay=self.weight_decay,
        )
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer=optimizer, T_max=self.ca_epochs
        )

        self._network.eval()
        self._network.to(self._device)
        # Clean multi-GPU setup for classifier alignment
        self._setup_multi_gpu_training()

        for ep in range(self.ca_epochs):
            losses = 0.0

            sampled_data = []
            sampled_label = []
            num_sampled_pcls = 256

            for c_id in range(self._total_classes):
                m = self.GD[c_id]
                sampled_data_single = m.sample(
                    sample_shape=torch.Size((num_sampled_pcls,))
                )
                sampled_data.append(sampled_data_single)
                sampled_label.extend([c_id] * num_sampled_pcls)

            sampled_data = torch.cat(sampled_data).float().to(self._device)
            sampled_label = torch.tensor(sampled_label).long().to(self._device)

            inputs = sampled_data
            targets = sampled_label

            sf_indexes = torch.randperm(inputs.size(0))
            inputs = inputs[sf_indexes]
            targets = targets[sf_indexes]

            for _iter in range(self._total_classes):
                inp = inputs[
                    _iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls
                ]
                tgt = targets[
                    _iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls
                ]
                outputs = self.ca_forward(self._network, inp)
                logits = outputs["logits"] * self.args["scale"]
                loss = F.cross_entropy(logits[:, : self._total_classes], tgt)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses += loss.item()

            scheduler.step()
            test_acc = self._compute_accuracy(self._network, self.test_loader)  # type: ignore
            info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
                self._cur_task, losses / self._total_classes, test_acc
            )
            logging.info(info)
            if test_acc >= self.best_acc_cur:
                self.best_acc_cur = test_acc
                self.best_acc[self._cur_task] = self.best_acc_cur
                self.best_epoch[self._cur_task] = ep
                self.best_model = copy.deepcopy(self._network.state_dict())

        report_str = (
            f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}],"
            + f" Average accuracy: {np.mean(self.best_acc)}"
        )
        logging.info(report_str)
        if self.args["early_stop"]:
            self._network.load_state_dict(self.best_model)
