# from typing import Callable
# import numpy as np
# import copy
# from torch.nn import functional as F
# from torch.distributions.multivariate_normal import MultivariateNormal
# from models.single_net import SSIAT

import logging
import torch
from torch import nn, optim
from torch.utils.data import DataLoader

from learners.ssiat import Learner as BaselineLearner

num_workers = 8

import os
import copy
import logging
from typing import Callable
import numpy as np
import torch
from torch import optim, nn
from torch.utils.data import DataLoader

from learners.base import BaseLearner
from models.single_net import BaseSingle
from utils.loss import AngularPenaltySMLoss
from utils.toolkit import (
    time_execution,
    LossMeter,
    AccMeter,
    TrainMP,
    NamespaceDict,
)
from torch.optim.adamw import AdamW
from torch.optim.adam import Adam
from torch.optim.sgd import SGD
from torch.distributions.multivariate_normal import MultivariateNormal


class Learner(BaselineLearner):
    def after_train(self):
        # Use clean multi-GPU teardown
        self._teardown_multi_gpu_training()
        self._compute_class_mean(self.data_manager)

        # calibrate feature shift
        if self._cur_task > 0:
            self.learnable_drift_compensation()

        # classifier alignment
        task_size = self.data_manager.get_task_size(self._cur_task)
        if self._cur_task > 0 and self.ca_epochs > 0:
            self._train_clf_alignment(
                task_size,
            )

    def learnable_drift_compensation(self):
        print("\n" + "=" * 50)
        print("Using Learnable Drift Compensation")
        print("=" * 50 + "\n")
        self.projector = nn.Linear(768, 768, bias=False)
        self.projector.to(self._device)
        optimizer = optim.Adam(self.projector.parameters(), lr=0.001)
        dataloader = DataLoader(
            self.train_loader.dataset,
            batch_size=64,
            shuffle=True,
            num_workers=num_workers,
        )

        for ep in range(20):
            for _, (_, inputs, targets) in enumerate(dataloader):
                inputs, targets = inputs.to(self._device), targets.to(
                    self._device
                )
                feats_old = self._old_network(inputs)["features"]
                feats_new = self._network(inputs)["features"]
                x_proj = self.projector(feats_old)
                loss = torch.nn.MSELoss()(x_proj, feats_new)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            logging.info(f"Epoch {ep}, Loss: {loss.item()}")

        with torch.no_grad():
            _old_proto = self._class_means[: self._known_classes].clone()
            _old_proto = _old_proto.to(torch.float32).to(self._device)
            _old_proto_w = (
                self.projector(_old_proto).detach().clone().cpu()
            )
            self._class_means[: self._known_classes] = _old_proto_w.to(torch.float64)

    @time_execution
    def after_task(self):
        # Clean multi-GPU teardown
        self._teardown_multi_gpu_training()

        self._save_ckps()
        self._backup()
        self._known_classes = self._total_classes
        self._network.after_task()

    def _save_ckps(self):
        dir_path = self.args["ckp_path"]
        if dir_path is not None:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)

            start_cls, end_cls = self.get_cls_range(self._cur_task)
            ckp_path = "{}/{}_{}_{}[{:.2f}].pth".format(
                dir_path,
                self.args["dataset"],
                start_cls,
                end_cls,
                self.best_acc_cur,
            )
            network_copy = copy.deepcopy(self._network)
            network_copy.to("cpu")
            torch.save(network_copy, ckp_path)

    def _backup(self):
        self._old_network = copy.deepcopy(self._network)
        self._old_network.requires_grad_(False)
        self._old_network.eval()
