import torch
from typing import Callable, TypeVar
from learners.multi_base import Learner as BaselineLearner

# from learners.baseline import ClsAlign
from models.multi_net import BaseMulti
from torch import optim
import numpy as np
import torch.nn.functional as F
import logging
import copy

num_workers = 8

T = TypeVar("T", bound=BaseMulti)


# class Learner(BaselineLearner[T]):
#     """Multi-strategy learner with configurable training strategies"""

#     _network: T

#     def __init__(self, args, data_manager, model_func: Callable = BaseMulti):
#         super().__init__(args, data_manager, model_func)
#         self.ca_epochs = args["ca_epochs"]
#         self.ca_lr = args["ca_lr"]
#         self.ca_forward = lambda model, inputs: model(inputs, mode="cur", ca=True)

#     def after_train(self):
#         super().after_train()

#         task_size = self.data_manager.get_task_size(self._cur_task)
#         if self.ca_epochs > 0:
#             print("\n" + "=" * 60)
#             print("Classifier Alignment Finetuning")
#             print("=" * 60 + "\n")
#             self._train_clf_alignment(
#                 task_size,
#             )

#     def _train_clf_alignment(self, task_size):
#         for p in self._network.fc.parameters():
#             p.requires_grad = True

#         param_list = [p for p in self._network.fc.parameters() if p.requires_grad]
#         network_params = [
#             {
#                 "params": param_list,
#                 "lr": self.ca_lr,
#                 "weight_decay": self.weight_decay,
#             }
#         ]

#         # print trainable parameters, for debugging
#         for name, param in self._network.fc.named_parameters():
#             if param.requires_grad:
#                 print(name)

#         optimizer = optim.SGD(
#             network_params,
#             lr=self.ca_lr,
#             momentum=0.9,
#             weight_decay=self.weight_decay,
#         )
#         scheduler = optim.lr_scheduler.CosineAnnealingLR(
#             optimizer=optimizer, T_max=self.ca_epochs
#         )

#         self._network.eval()
#         self._network.to(self._device)
#         # Use clean multi-GPU setup for classifier alignment
#         self._setup_multi_gpu_training()

#         for ep in range(self.ca_epochs):
#             losses = 0.0

#             sampled_data = []
#             sampled_label = []
#             num_sampled_pcls = 256

#             for c_id in range(self._total_classes):
#                 m = self.GD[c_id]
#                 sampled_data_single = m.sample(
#                     sample_shape=torch.Size((num_sampled_pcls,))
#                 )
#                 sampled_data.append(sampled_data_single)
#                 sampled_label.extend([c_id] * num_sampled_pcls)

#             sampled_data = torch.cat(sampled_data).float().to(self._device)
#             sampled_label = torch.tensor(sampled_label).long().to(self._device)

#             inputs = sampled_data
#             targets = sampled_label

#             sf_indexes = torch.randperm(inputs.size(0))
#             inputs = inputs[sf_indexes]
#             targets = targets[sf_indexes]

#             for _iter in range(self._total_classes):
#                 inp = inputs[_iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls]
#                 tgt = targets[_iter * num_sampled_pcls : (_iter + 1) * num_sampled_pcls]
#                 outputs = self.ca_forward(self._network, inp[:, None])
#                 logits = outputs["logits"] * self.args["scale"]
#                 loss = F.cross_entropy(logits[:, : self._total_classes], tgt)

#                 optimizer.zero_grad()
#                 loss.backward()
#                 optimizer.step()
#                 losses += loss.item()

#             scheduler.step()
#             test_acc = self._compute_accuracy(self._network, self.test_loader)  # type: ignore
#             info = "CA Task {} => Loss {:.3f}, Test_accy {:.3f}".format(
#                 self._cur_task, losses / self._total_classes, test_acc
#             )
#             logging.info(info)
#             if test_acc >= self.best_acc_cur:
#                 self.best_acc_cur = test_acc
#                 self.best_acc[self._cur_task] = self.best_acc_cur
#                 self.best_epoch[self._cur_task] = ep
#                 self.best_model = copy.deepcopy(self._network.state_dict())


#         report_str = (
#             f"Task {self._cur_task} => Best accuracy: {self.best_acc_cur}[{self.best_epoch[self._cur_task]}],"
#             + f" Average accuracy: {np.mean(self.best_acc)}"
#         )
#         logging.info(report_str)
#         if self.args["early_stop"]:
#             self._network.load_state_dict(self.best_model)
class Learner(BaselineLearner[T]):
    """Multi-strategy learner with configurable training strategies"""

    _network: T

    def __init__(self, args, data_manager, model_func: Callable = BaseMulti):
        super().__init__(args, data_manager, model_func)
        self.ca_forward = lambda model, inputs: model(inputs, mode="cur", ca=True)
