from typing import (
    Dict,
    Any,
    Tuple,
    List,
    Callable,
    ClassVar,
)

# Import component interfaces for type annotations
from torch.utils.data import DataLoader
import logging
import copy
import numpy as np
import torch

from learners.interfaces import LearnerInterface
from learners.components.hook_manager import TrainHookManager
from learners.strategies.forward_strategies import StandardForwardStrategy

# Import interfaces for type annotations
from learners.registry import (
    create_training_strategy,
    create_classifier_alignment,
    create_drift_compensation,
    create_prototype_manager,
    create_memory_manager,
    create_evaluation_manager,
)


class BaseLearner(LearnerInterface):
    """Abstract base class providing common functionality for all learners.

    This class implements shared behavior while leaving specific algorithm
    implementations to subclasses. It serves as a bridge between the interface
    contract and concrete implementations.
    """

    # Class constants (using ClassVar to distinguish from instance attributes)
    DEFAULT_LOGGING_LEVEL: ClassVar[int] = logging.INFO
    DEFAULT_BATCH_SIZE: ClassVar[int] = 32
    DEFAULT_EVAL_BATCH_SIZE: ClassVar[int] = 128
    DEFAULT_EPOCHS: ClassVar[int] = 100
    DEFAULT_LEARNING_RATE: ClassVar[float] = 0.1
    DEFAULT_SCHEDULER_MILESTONES: ClassVar[List[int]] = [30, 60, 80]
    DEFAULT_TOPK: ClassVar[int] = 5

    # Components Defaults
    DEFAULT_EVALUATION_MANAGER: ClassVar[str] = "cil_evaluation_manager"
    DEFAULT_DRIFT_COMPENSATION: ClassVar[str] = "no_compensation"
    DEFAULT_TRAINING_STRATEGY: ClassVar[str] = "standard"
    DEFAULT_PROTOTYPE_MANAGER: ClassVar[str] = "standard"
    DEFAULT_MEMORY_MANAGER: ClassVar[str] = "standard"
    DEFAULT_MEMORY_SIZE: ClassVar[int] = 2000
    # Classifier alignment defaults
    DEFAULT_CLASSIFIER_ALIGNMENT: ClassVar[str] = "standard"
    DEFAULT_CA_EPOCHS: ClassVar[int] = 5
    DEFAULT_CA_LR: ClassVar[float] = 0.01
    DEFAULT_CA_WEIGHT_DECAY: ClassVar[float] = 0.0005
    DEFAULT_CA_SAMPLES_PER_CLASS: ClassVar[int] = 256
    DEFAULT_CA_MOMENTUM: ClassVar[float] = 0.9

    def __init__(self, args: Dict[str, Any], data_manager: Any, model_func: Callable):
        """Initialize base learner with common parameters.

        Args:
            args: Configuration dictionary
            data_manager: Manager for dataset access
            model_func: Function to create the model
        """
        # Basic initialization
        self.args = args
        self.data_manager = data_manager
        self._network = model_func(args)
        # Initialize old_network as a copy of network right away to avoid None checks
        self._old_network = copy.deepcopy(self._network)
        self._old_network.requires_grad_(False)
        self._old_network.eval()
        self._device = args["device"][0]
        self._multiple_gpus = args["device"]
        self.topk = args.get("topk", self.DEFAULT_TOPK)

        # Task tracking
        self._cur_task_id = -1
        self._known_classes_count = 0
        self._total_classes_count = 0
        self.task_sizes = []

        # Model metrics
        self.best_acc_cur = 0
        self.best_acc = []
        self.best_epoch = []
        self.best_model = {}

        # Training settings
        self._batch_size = args.get("batch_size", self.DEFAULT_BATCH_SIZE)
        self._eval_batch_size = args.get(
            "eval_batch_size", self.DEFAULT_EVAL_BATCH_SIZE
        )
        self._epochs = args.get("epochs", self.DEFAULT_EPOCHS)
        self._learning_rate = args.get("init_lr", self.DEFAULT_LEARNING_RATE)
        self._scheduler_milestones = args.get(
            "milestones", self.DEFAULT_SCHEDULER_MILESTONES
        )
        self._scheduler_lambda = None

        # TODO integrate them into the registrition system
        self.forward_strategy = StandardForwardStrategy()
        # Initialize components with explicit types
        self.hook_manager = TrainHookManager()

        # Memory manager with type annotation
        self.memory_manager = create_memory_manager(
            args.get("memory_manager", self.DEFAULT_MEMORY_MANAGER),
            memory_size=args.get("memory_size", self.DEFAULT_MEMORY_SIZE),
            fixed_memory=args.get("fixed_memory", True),
            memory_per_class=args.get("memory_per_class", None),
        )

        # FIXME Same problem here, can't recognize the right type. Have no clue on how to fix it
        self.prototype_manager = create_prototype_manager(
            args.get("prototype_manager", self.DEFAULT_PROTOTYPE_MANAGER),
            feature_dim=self._network.feature_dim,
        )
        self.training_strategy = create_training_strategy(
            args.get("training_strategy", self.DEFAULT_TRAINING_STRATEGY)
        )
        self.drift_compensation = create_drift_compensation(
            args.get("drift_compensation", self.DEFAULT_DRIFT_COMPENSATION)
        )
        self.alignment = create_classifier_alignment(
            args.get("classifier_alignment", self.DEFAULT_CLASSIFIER_ALIGNMENT),
            epochs=args.get("ca_epochs", self.DEFAULT_CA_EPOCHS),
            lr=args.get("ca_lr", self.DEFAULT_CA_LR),
            weight_decay=args.get("weight_decay", self.DEFAULT_CA_WEIGHT_DECAY),
            samples_per_class=args.get(
                "ca_samples_per_class", self.DEFAULT_CA_SAMPLES_PER_CLASS
            ),
            momentum=args.get("ca_momentum", self.DEFAULT_CA_MOMENTUM),
        )
        self.alignment.register_hooks(self)

        # Evaluation manager with type annotation
        self.evaluation_manager = create_evaluation_manager("cil_evaluation_manager")

    @property
    def _cur_task(self) -> int:
        return self._cur_task_id

    @_cur_task.setter
    def _cur_task(self, value: int) -> None:
        self._cur_task_id = value

    @property
    def _known_classes(self) -> int:
        return self._known_classes_count

    @_known_classes.setter
    def _known_classes(self, value: int) -> None:
        self._known_classes_count = value

    @property
    def _total_classes(self) -> int:
        return self._total_classes_count

    @_total_classes.setter
    def _total_classes(self, value: int) -> None:
        self._total_classes_count = value
        self.task_sizes.append(value)

    def _update_fc(self, nb_classes: int) -> None:
        """Update the final classifier layer."""
        # Call before_classifier hooks
        self.hook_manager.call_hook(
            "before_classifier", learner=self, nb_classes=nb_classes
        )

        self._network.update_fc(
            nb_classes,
            freeze_old=True,
            fc_kwargs={
                "fc_temperture": self.args["fc_temperture"],
                "with_norm": self.args["with_norm"],
                "fc_bias": self.args["fc_bias"],
            },
        )

        # Call after_classifier hooks
        self.hook_manager.call_hook(
            "after_classifier", learner=self, nb_classes=nb_classes
        )

    def _prepare_dataloaders(self) -> Tuple[DataLoader, DataLoader]:
        """Prepare data loaders for current task."""
        train_dataset = self.data_manager.get_dataset(
            np.arange(self._known_classes, self._total_classes),
            source="train",
            mode="train",
        )
        train_loader = DataLoader(
            train_dataset,
            batch_size=self._batch_size,
            shuffle=True,
            num_workers=8,
        )

        test_dataset = self.data_manager.get_dataset(
            np.arange(0, self._total_classes), source="test", mode="test"
        )
        test_loader = DataLoader(
            test_dataset,
            batch_size=self._batch_size,
            shuffle=False,
            num_workers=8,
        )

        # Call hooks for potential dataset transformations
        hook_results = self.hook_manager.call_hook(
            "after_dataloader_creation",
            train_loader=train_loader,
            test_loader=test_loader,
            cur_task=self._cur_task,
        )

        # Allow hooks to modify loaders
        if hook_results:
            for result in hook_results:
                if isinstance(result, dict):
                    if "train_loader" in result:
                        train_loader = result["train_loader"]
                    if "test_loader" in result:
                        test_loader = result["test_loader"]

        return train_loader, test_loader

    def _get_task_test_loader(self, task_id: int) -> DataLoader:
        """Get test loader for a specific task."""
        test_dataset = self.data_manager.get_dataset(
            np.arange(task_id, task_id + 1), source="test", mode="test"
        )
        test_loader = DataLoader(
            test_dataset,
            batch_size=self._eval_batch_size,
            shuffle=False,
            num_workers=8,
        )
        return test_loader

    def _backup_model(self) -> None:
        """Backup the current model for future reference."""
        self._old_network = copy.deepcopy(self._network)
        self._old_network.requires_grad_(False)
        self._old_network.eval()

    def _extract_tokens(self, loader: DataLoader) -> Tuple[torch.Tensor, torch.Tensor]:
        """Extract features/tokens from data loader."""
        self._network.eval()
        vectors, targets = [], []

        with torch.no_grad():
            for _, _inputs, _targets in loader:
                _inputs = _inputs.to(self._device)
                _targets = _targets.to(self._device)
                _vectors = self.extract_token(_inputs)
                vectors.append(_vectors)
                targets.append(_targets)

        # Call hooks after feature extraction
        combined_vectors = torch.cat(vectors, dim=0)
        combined_targets = torch.cat(targets, dim=0)

        return combined_vectors, combined_targets

    def extract_token(self, inputs: torch.Tensor) -> torch.Tensor:
        """Extract tokens/features from inputs."""
        return self._network.extract_token(inputs)

    # Define forward methods
    def forward_ca(self, model, inputs, *args, **kwargs):
        return self.forward_strategy.forward_ca(model, inputs, *args, **kwargs)

    def forward_train(self, model, inputs, *args, **kwargs):
        return self.forward_strategy.forward_train(model, inputs, *args, **kwargs)

    def forward_test(self, model, inputs, *args, **kwargs):
        return self.forward_strategy.forward_test(model, inputs, *args, **kwargs)
