import logging
import torch.nn as nn
from typing import Dict, Any, Tuple, Optional
from learners.core.base_learner import BaseLearner


class CoreLearner(BaseLearner):
    """Core learner implementation with composition-based architecture."""

    def before_task(self) -> None:
        """Setup before starting a new task."""
        # Call before_task hooks
        self.hook_manager.call_hook(
            "before_task", learner=self, cur_task=self._cur_task + 1
        )

        self._cur_task += 1
        self.task_size = self.data_manager.get_task_size(self._cur_task)
        self.task_sizes.append(self.task_size)
        self._total_classes = self._known_classes + self.task_size

        logging.info(
            "Learning on {}-{}".format(
                str(self._known_classes).rjust(4, "0"),
                str(self._total_classes - 1).rjust(4, "0"),
            )
        )

        self._update_fc(self.task_size)

    def before_train(self) -> None:
        """Setup before training."""
        self.hook_manager.call_hook(
            "before_train", learner=self, cur_task=self._cur_task
        )

        self.best_acc_cur = 0.0
        self.best_model = {}
        self.best_acc.append(self.best_acc_cur)
        self.best_epoch.append(0)

        self.train_loader, self.test_loader = self._prepare_dataloaders()
        self._network.to(self._device)

    # FIXME add all updates to the before_function instead of after_task
    def incremental_train(self) -> None:
        """Main training loop with hooks for extensibility."""
        self.before_task()
        self.before_train()

        # Core training using strategy pattern
        training_results = self.training_strategy.train_task(
            network=self._network,
            train_loader=self.train_loader,
            test_loader=self.test_loader,
            args=self.args,
            device=self._device,
            old_network=self._old_network,
            cur_task=self._cur_task,
            known_classes=self._known_classes,
            total_classes=self._total_classes,
            hook_manager=self.hook_manager,  # Pass hook manager to training strategy
            evaluation_manager=self.evaluation_manager,  # Pass evaluation manager to strategy
        )

        self.after_train(training_results)
        self.after_task()

    def after_train(self, training_results) -> None:
        # Update internal state from training results
        if "best_acc" in training_results:
            self.best_acc_cur = training_results["best_acc"]
        if "best_model" in training_results:
            self.best_model = training_results["best_model"]

        """Post-training cleanup and analysis."""
        if isinstance(self._network, nn.DataParallel):
            self._network = self._network.module

        # Compute class statistics
        self.prototype_manager.fit(
            self.data_manager,
            self._extract_tokens,  # Pass the function to extract tokens
            self._known_classes,
            self._total_classes,
            self._device,
        )

        # Apply drift compensation if necessary
        if self._cur_task > 0:
            self.drift_compensation.compensate(
                self._network,
                self._old_network,
                self.train_loader,
                self.prototype_manager.class_means,
                self._known_classes,
                self._device,
            )

        # Call after_train hooks
        self.hook_manager.call_hook(
            "after_train",
            learner=self,
            cur_task=self._cur_task,
            best_acc=self.best_acc_cur,
        )

    def after_task(self) -> None:
        """Post-task cleanup."""
        if isinstance(self._network, nn.DataParallel):
            self._network = self._network.module
        self._backup_model()
        self._known_classes = self._total_classes

        self._network.after_task()

        # Call after_task hooks
        self.hook_manager.call_hook("after_task", learner=self, cur_task=self._cur_task)

    def eval_task(self) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]:
        """Evaluate the model on the current task."""
        # Prepare data loaders for all seen tasks
        data_loaders = {}
        for t_id in range(self._cur_task + 1):
            data_loaders[t_id] = self._get_task_test_loader(t_id)

        # Use evaluation manager to evaluate all tasks
        results = self.evaluation_manager.evaluate_task(
            network=self._network,
            data_loaders=data_loaders,
            forward_fn=self.forward_test,
            task_id=self._cur_task,
            device=self._device,
            use_nme=hasattr(self.prototype_manager, "class_means")
            and self.args.get("evalNME", True),
            nme_forward_fn=(
                self.extract_token if self.args.get("evalNME", True) else None
            ),
            nme_k=self.args.get("nme_k", -1),
            class_means=getattr(self.prototype_manager, "class_means", None),
            known_classes=self._known_classes,
        )

        # Return formatted results
        cnn_accs = results.get("task_accuracy", {})
        nme_accs = (
            results.get("nme_task_accuracy", {})
            if "nme_task_accuracy" in results
            else None
        )

        return cnn_accs, nme_accs

    def eval_current_task(self):
        """Evaluate only the current task for quick assessment."""
        return self.evaluation_manager.evaluate_current_task(
            network=self._network,
            data_loader=self.test_loader,
            forward_fn=self.forward_test,
            task_id=self._cur_task,
            device=self._device,
        )
