import math
import os
from pathlib import Path
from typing import Any, Callable, Iterable

import torch
import torch.distributed.checkpoint as dcp
import torch.optim.lr_scheduler as lr_scheduler
from torch import Tensor
from torch.distributed.checkpoint import FileSystemReader, FileSystemWriter
from torch.optim import Adam, Optimizer
from tqdm import tqdm
from wandb.sdk.wandb_run import Run

from lm_saes.abstract_sae import AbstractSparseAutoEncoder
from lm_saes.config import TrainerConfig
from lm_saes.optim import SparseAdam, get_scheduler
from lm_saes.utils.distributed.ops import full_tensor
from lm_saes.utils.logging import get_distributed_logger, log_metrics
from lm_saes.utils.misc import is_primary_rank
from lm_saes.utils.tensor_dict import batch_size
from lm_saes.utils.timer import timer

logger = get_distributed_logger("trainer")


class Trainer:
    def __init__(self, cfg: TrainerConfig):
        self.cfg = cfg
        self.checkpoint_thresholds: list[int] = []
        self.total_training_steps: int = 0
        self.lr_warm_up_steps: int = 0
        self.lr_cool_down_steps: int = 0
        self.k_warmup_steps: int = 0
        self.k_cold_booting_steps: int = 0
        self.l1_coefficient_warmup_steps: int = 0
        self.cur_step: int = 0
        self.cur_tokens: int = 0
        self.optimizer: Optimizer | None = None
        self.scheduler: lr_scheduler.LRScheduler | None = None
        self.wandb_logger: Run | None = None

    def save_checkpoint(self, sae: AbstractSparseAutoEncoder, checkpoint_path: Path | str) -> None:
        """
        Save a complete checkpoint including model, optimizer, scheduler, and
        trainer state.

        Args:
            sae: The sparse autoencoder model to save
            checkpoint_path: Path where to save the checkpoint (without extension)
        """

        # Create checkpoint directory if it doesn't exist
        checkpoint_dir = Path(checkpoint_path) / "checkpoints" / f"step_{self.cur_step}"

        if checkpoint_dir and not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir, exist_ok=True)

        sae.cfg.save_hyperparameters(checkpoint_dir)
        # Save model state
        if sae.device_mesh is None:
            sae.save_checkpoint(Path(checkpoint_dir) / "sae_weights.safetensors")
        else:
            sae.save_checkpoint(Path(checkpoint_dir) / "sae_weights.dcp")

        if is_primary_rank(sae.device_mesh):
            # Prepare trainer state
            trainer_state = {
                "cur_step": self.cur_step,
                "cur_tokens": self.cur_tokens,
                "total_training_steps": self.total_training_steps,
                "lr_warm_up_steps": self.lr_warm_up_steps,
                "lr_cool_down_steps": self.lr_cool_down_steps,
                "k_warmup_steps": self.k_warmup_steps,
                "k_cold_booting_steps": self.k_cold_booting_steps,
                "l1_coefficient_warmup_steps": self.l1_coefficient_warmup_steps,
                "checkpoint_thresholds": self.checkpoint_thresholds,
                "cfg": self.cfg,
            }

            # Save trainer state
            trainer_path = checkpoint_dir / "trainer.pt"
            torch.save(trainer_state, trainer_path)

        # Save optimizer state - handle distributed tensors
        if self.optimizer is not None:
            if sae.device_mesh is None:
                if is_primary_rank(sae.device_mesh):
                    optimizer_path = checkpoint_dir / "optimizer.pt"
                    optimizer_state = self.optimizer.state_dict()
                    torch.save(optimizer_state, optimizer_path)
            else:
                optimizer_path = checkpoint_dir / "optimizer.dcp"
                optimizer_state = self.optimizer.state_dict()
                fs_writer = FileSystemWriter(optimizer_path)
                dcp.save(optimizer_state, storage_writer=fs_writer)

        # Save scheduler state - handle distributed tensors
        if self.scheduler is not None:
            if sae.device_mesh is None:
                if is_primary_rank(sae.device_mesh):
                    scheduler_path = checkpoint_dir / "scheduler.pt"
                    scheduler_state = self.scheduler.state_dict()
                    torch.save(scheduler_state, scheduler_path)
            else:
                scheduler_path = checkpoint_dir / "scheduler.dcp"
                scheduler_state = self.scheduler.state_dict()
                fs_writer = FileSystemWriter(scheduler_path)
                dcp.save(scheduler_state, storage_writer=fs_writer)

        logger.info(f"Checkpoint saved to {checkpoint_path}")

    @classmethod
    def from_checkpoint(
        cls,
        sae: AbstractSparseAutoEncoder,
        checkpoint_path: str,
    ) -> "Trainer":
        """
        Load a complete checkpoint including model, optimizer, scheduler, and
        trainer state.

        Args:
            device_mesh: The device mesh to load the model into
            checkpoint_path: Path where the checkpoint was saved (without extension)

        Returns:
            Trainer: A new trainer instance with loaded state
        """
        # Load trainer state first to get the config
        checkpoint_dir = Path(checkpoint_path)
        trainer_path = checkpoint_dir / "trainer.pt"
        if os.path.exists(trainer_path):
            trainer_state = torch.load(trainer_path, map_location="cpu", weights_only=False)
            cfg = trainer_state.get("cfg")
            if cfg is None:
                raise ValueError("Checkpoint does not contain trainer config")

            # Create trainer instance with loaded config
            trainer = cls(cfg)
            trainer.cfg.from_pretrained_path = checkpoint_path

            # Restore trainer state variables
            trainer.cur_step = trainer_state["cur_step"]
            trainer.cur_tokens = trainer_state["cur_tokens"]
            trainer.total_training_steps = trainer_state["total_training_steps"]
            trainer.lr_warm_up_steps = trainer_state["lr_warm_up_steps"]
            trainer.lr_cool_down_steps = trainer_state["lr_cool_down_steps"]
            trainer.k_warmup_steps = trainer_state["k_warmup_steps"]
            trainer.k_cold_booting_steps = trainer_state["k_cold_booting_steps"]
            trainer.l1_coefficient_warmup_steps = trainer_state["l1_coefficient_warmup_steps"]
            trainer.checkpoint_thresholds = trainer_state["checkpoint_thresholds"]

            logger.info(f"Loaded trainer state from step {trainer.cur_step}")
        else:
            raise ValueError(f"Trainer checkpoint not found at {trainer_path}")

        trainer._initialize_optimizer(sae)
        assert trainer.optimizer is not None and trainer.scheduler is not None, (
            "Optimizer and scheduler should be already initialized"
        )

        # Load optimizer state
        if sae.device_mesh is None:
            optimizer_path = checkpoint_dir / "optimizer.pt"
            optimizer_state = torch.load(optimizer_path, map_location="cpu")
            trainer.optimizer.load_state_dict(optimizer_state)
            logger.info("Loaded optimizer state")
        else:
            optimizer_path = checkpoint_dir / "optimizer.dcp"
            fs_reader = FileSystemReader(str(optimizer_path))
            optimizer_state = trainer.optimizer.state_dict()
            dcp.load(optimizer_state, storage_reader=fs_reader)
            trainer.optimizer.load_state_dict(optimizer_state)
            logger.info("Loaded optimizer state")
            logger.info(f"trainer.optimizer.state_dict(): {trainer.optimizer.state_dict()}")

        # Load scheduler state
        if sae.device_mesh is None:
            scheduler_path = checkpoint_dir / "scheduler.pt"
            scheduler_state = torch.load(scheduler_path, map_location="cpu")
            trainer.scheduler.load_state_dict(scheduler_state)
            logger.info("Loaded scheduler state")
        else:
            scheduler_path = checkpoint_dir / "scheduler.dcp"
            fs_reader = FileSystemReader(str(scheduler_path))
            scheduler_state = trainer.scheduler.state_dict()
            dcp.load(scheduler_state, storage_reader=fs_reader)
            trainer.scheduler.load_state_dict(scheduler_state)
            logger.info("Loaded scheduler state")
            logger.info(f"trainer.scheduler.state_dict(): {trainer.scheduler.state_dict()}")

        logger.info(f"Checkpoint loaded from {checkpoint_path}")
        return trainer

    @timer.time("initialize_trainer")
    def _initialize_trainer(
        self,
        sae: AbstractSparseAutoEncoder,
        activation_stream: Iterable[dict[str, Tensor]],
        wandb_logger: Run | None = None,
    ):
        bs = batch_size(next(iter(activation_stream)))
        self.total_training_steps = self.cfg.total_training_tokens // bs

        def calculate_warmup_steps(warmup_steps: float | int) -> int:
            if isinstance(warmup_steps, float):
                assert 0 <= warmup_steps <= 1.0
                return int(warmup_steps * self.total_training_steps)
            return warmup_steps

        self.lr_warm_up_steps = calculate_warmup_steps(self.cfg.lr_warm_up_steps)
        self.lr_cool_down_steps = calculate_warmup_steps(self.cfg.lr_cool_down_steps)
        self.k_warmup_steps = calculate_warmup_steps(self.cfg.k_warmup_steps)
        self.k_cold_booting_steps = calculate_warmup_steps(self.cfg.k_cold_booting_steps)
        self.l1_coefficient_warmup_steps = calculate_warmup_steps(self.cfg.l1_coefficient_warmup_steps)
        if self.cfg.n_checkpoints > 0:
            if self.cfg.check_point_save_mode == "linear":
                self.checkpoint_thresholds = list(
                    range(0, self.cfg.total_training_tokens, self.cfg.total_training_tokens // self.cfg.n_checkpoints)
                )[1:]
            elif self.cfg.check_point_save_mode == "log":
                self.checkpoint_thresholds = [
                    math.ceil(2 ** (i / self.cfg.n_checkpoints * math.log2(self.total_training_steps))) * bs
                    for i in range(1, self.cfg.n_checkpoints)
                ]
        self.wandb_logger = wandb_logger

    @timer.time("initialize_optimizer")
    def _initialize_optimizer(self, sae: AbstractSparseAutoEncoder):
        assert isinstance(self.cfg.lr, float)

        def _apply_lr(parameters: dict[str, Any]):
            assert isinstance(self.cfg.lr, float)
            if parameters["name"] == "jumprelu":
                return {**parameters, "lr": self.cfg.jumprelu_lr_factor * self.cfg.lr}
            return parameters

        params = [_apply_lr(parameters) for parameters in sae.get_parameters()]

        def _format_parameters(parameters: dict[str, Any]) -> str:
            param_info = f"{parameters['name']}:"
            for i, param in enumerate(parameters["params"]):
                param_info += f"\n    [{i}] shape={list(param.shape)}, dtype={param.dtype}"
                if param.requires_grad:
                    param_info += ", trainable"
                else:
                    param_info += ", frozen"
            if "lr" in parameters:
                param_info += f"\n    lr={parameters['lr']}"
            return param_info

        param_str = "\n".join([_format_parameters(p) for p in params])
        logger.info(f"\nParameter Groups: \n{param_str}\n")

        optim_cls = {
            "adam": Adam,
            "sparseadam": SparseAdam,
        }[self.cfg.optimizer_class]

        optimizer_kwargs = {
            "params": params,
            "lr": self.cfg.lr,
            "betas": self.cfg.betas,
        }

        # only adam optimizer supports foreach parameter
        if self.cfg.optimizer_class == "adam":
            optimizer_kwargs["foreach"] = self.cfg.optimizer_foreach

        optimizer = optim_cls(**optimizer_kwargs)

        scheduler = get_scheduler(
            scheduler_name=self.cfg.lr_scheduler_name,
            optimizer=optimizer,
            warm_up_steps=self.lr_warm_up_steps,
            cool_down_steps=self.lr_cool_down_steps,
            training_steps=self.total_training_steps,
            lr_end_ratio=self.cfg.lr_end_ratio,
        )
        self.optimizer = optimizer
        self.scheduler = scheduler

    @timer.time("training_step")
    def _training_step(
        self,
        sae: AbstractSparseAutoEncoder,
        batch: dict[str, Tensor],
    ) -> dict[str, Tensor]:
        if "topk" in sae.cfg.act_fn and self.k_warmup_steps > 0:
            assert self.cfg.initial_k is not None, "initial_k must be provided"
            assert self.cfg.initial_k >= sae.cfg.top_k, "initial_k must be greater than or equal to top_k"
            if self.cur_step < self.k_cold_booting_steps:
                sae.set_current_k(int(self.cfg.initial_k))
            else:
                warmup_progress = (self.cur_step - self.k_cold_booting_steps) / self.k_warmup_steps
                warmup_progress = min(1.0, warmup_progress)

                if self.cfg.k_schedule_type == "exponential":
                    exp_factor = self.cfg.k_exponential_factor
                    decay_factor = (1.0 - math.exp(-exp_factor * warmup_progress)) / (1.0 - math.exp(-exp_factor))
                    current_k = self.cfg.initial_k - (self.cfg.initial_k - sae.cfg.top_k) * decay_factor
                elif self.cfg.k_schedule_type == "linear":
                    current_k = self.cfg.initial_k + (sae.cfg.top_k - self.cfg.initial_k) * warmup_progress
                else:
                    current_k = self.cfg.initial_k + (sae.cfg.top_k - self.cfg.initial_k) * warmup_progress

                sae.set_current_k(
                    max(
                        sae.cfg.top_k,
                        math.ceil(current_k),
                    )
                )

        l1_coefficient = (
            min(1.0, self.cur_step / self.l1_coefficient_warmup_steps) * self.cfg.l1_coefficient
            if self.cfg.l1_coefficient is not None
            else 1.0
        )

        lp_coefficient = self.cfg.lp_coefficient if self.cfg.lp_coefficient is not None else 0.0

        loss, (loss_data, aux_data) = sae.compute_loss(
            batch,
            sparsity_loss_type=self.cfg.sparsity_loss_type,
            tanh_stretch_coefficient=self.cfg.tanh_stretch_coefficient,
            p=self.cfg.p,
            use_batch_norm_mse=self.cfg.use_batch_norm_mse,
            return_aux_data=True,
            l1_coefficient=l1_coefficient,
            lp_coefficient=lp_coefficient,
            frequency_scale=self.cfg.frequency_scale,
        )

        loss_dict = (
            {
                "loss": loss,
                "batch_size": batch_size(batch),
                "l1_coefficient": l1_coefficient,
                "lp_coefficient": lp_coefficient,
            }
            | loss_data
            | aux_data
        )
        return loss_dict

    @torch.no_grad()
    @timer.time("log")
    def _log(self, sae: AbstractSparseAutoEncoder, log_info: dict, batch: dict[str, Tensor]):
        """Log training metrics and sparsity statistics.

        Delegates model-specific logging to the model's methods.
        """
        assert self.optimizer is not None, "Optimizer must be initialized"
        label = sae.prepare_label(batch)

        # Prepare logging data (model-specific transformations)
        log_info, label = sae.prepare_logging_data(log_info.copy(), label)

        # Compute activation frequency scores
        act_freq_scores = sae.compute_activation_frequency_scores(log_info["feature_acts"])
        act_freq_scores = full_tensor(act_freq_scores)

        log_info["act_freq_scores"] += act_freq_scores
        log_info["n_frac_active_tokens"] += log_info["batch_size"]

        # Log sparsity metrics periodically
        if (self.cur_step + 1) % self.cfg.feature_sampling_window == 0:
            feature_sparsity = log_info["act_freq_scores"] / log_info["n_frac_active_tokens"]
            wandb_log_dict = sae.compute_sparsity_metrics(feature_sparsity)

            if is_primary_rank(sae.device_mesh):
                log_metrics(logger.logger, wandb_log_dict, step=self.cur_step + 1, title="Sparsity Metrics")
            if self.wandb_logger is not None:
                self.wandb_logger.log(wandb_log_dict, step=self.cur_step + 1)
            log_info["act_freq_scores"] = torch.zeros_like(log_info["act_freq_scores"])
            log_info["n_frac_active_tokens"] = torch.zeros_like(log_info["n_frac_active_tokens"])

        # Log training metrics periodically
        if (self.cur_step + 1) % self.cfg.log_frequency == 0:
            feature_acts = log_info["feature_acts"]
            reconstructed = log_info["reconstructed"]

            # Convert DTensors to regular tensors for computation
            feature_acts = full_tensor(feature_acts)
            reconstructed = full_tensor(reconstructed)
            label = full_tensor(label)

            # Compute common metrics
            act_feature_counts = feature_acts.gt(0).float().sum()
            mean_feature_act = feature_acts.sum() / act_feature_counts
            mean_feature_act = full_tensor(mean_feature_act)

            l0 = (feature_acts > 0).float().sum(-1)
            l0 = full_tensor(l0)

            l_rec = full_tensor(log_info["l_rec"])
            l_s = full_tensor(log_info.get("l_s", None)) if log_info.get("l_s", None) is not None else None  # pyright: ignore[reportArgumentType]
            l_p = full_tensor(log_info.get("l_p", None)) if log_info.get("l_p", None) is not None else None  # pyright: ignore[reportArgumentType]

            # Compute reconstruction metrics
            per_token_l2_loss = (reconstructed - label).pow(2).sum(dim=-1)
            total_variance = (label - label.mean(dim=0)).pow(2).sum(dim=-1)
            l2_norm_error = per_token_l2_loss.sqrt().mean()
            l2_norm_error_ratio = l2_norm_error / label.norm(p=2, dim=-1).mean()
            explained_variance_legacy = 1 - per_token_l2_loss / total_variance
            l2_loss_mean = per_token_l2_loss.mean(dim=0)
            total_variance_mean = total_variance.mean(dim=0)
            explained_variance = 1 - l2_loss_mean / total_variance_mean

            # Add model-specific training metrics (may modify l0 shape)
            model_metrics = sae.compute_training_metrics(
                feature_acts=feature_acts,
                reconstructed=reconstructed,
                label=label,
                l_rec=l_rec,
                l0=l0,
                explained_variance=explained_variance,
                explained_variance_legacy=explained_variance_legacy,
            )

            # Aggregate l0 for overall metric if needed (e.g., CLT sums over layers)
            l0_for_overall = sae.aggregate_l0(l0)

            # Build base metrics dictionary
            wandb_log_dict = {
                # losses
                "losses/mse_loss": l_rec.mean().item(),
                **({"losses/sparsity_loss": l_s.mean().item()} if l_s is not None else {}),
                **({"losses/lp_loss": l_p.mean().item()} if l_p is not None else {}),
                "losses/overall_loss": full_tensor(log_info["loss"]).item(),
                # variance explained
                "metrics/explained_variance": explained_variance.mean().item(),
                "metrics/explained_variance_legacy": explained_variance_legacy.mean().item(),
                # sparsity
                "metrics/l0": l0_for_overall.mean().item(),
                "metrics/mean_feature_act": mean_feature_act.item(),
                "metrics/l2_norm_error": l2_norm_error.item(),
                "metrics/l2_norm_error_ratio": l2_norm_error_ratio.item(),
                # details
                "details/current_learning_rate": self.optimizer.param_groups[0]["lr"],
                "details/n_training_tokens": self.cur_tokens,
                "details/l1_coefficient": log_info["l1_coefficient"],
                "details/lp_coefficient": log_info["lp_coefficient"],
            }

            # Add model-specific metrics
            wandb_log_dict.update(model_metrics)

            # Add timer information
            wandb_log_dict.update(sae.log_statistics())

            if is_primary_rank(sae.device_mesh):
                log_metrics(logger.logger, wandb_log_dict, step=self.cur_step + 1, title="Training Metrics")

            if timer.enabled:
                logger.info(f"\nTimer Summary:\n{timer.summary()}\n")

            if self.wandb_logger is not None:
                self.wandb_logger.log(wandb_log_dict, step=self.cur_step + 1)

    @timer.time("save_checkpoint")
    def _maybe_save_sae_checkpoint(self, sae: AbstractSparseAutoEncoder):
        if len(self.checkpoint_thresholds) > 0 and self.cur_tokens >= self.checkpoint_thresholds[0]:
            suffix = "safetensors" if sae.device_mesh is None else "dcp"
            path = os.path.join(
                self.cfg.exp_result_path,
                "checkpoints",
                f"{self.cur_step}.{suffix}",
            )
            sae.save_checkpoint(path)
            self.checkpoint_thresholds.pop(0)

    def fit(
        self,
        sae: AbstractSparseAutoEncoder,
        activation_stream: Iterable[dict[str, Tensor]],
        eval_fn: Callable[[AbstractSparseAutoEncoder], None] | None = None,
        wandb_logger: Run | None = None,
    ) -> bool | None:
        # Reset timer at the start of training
        timer.reset()

        if self.cfg.from_pretrained_path is None:
            logger.info("Initializing trainer and optimizer")
            self._initialize_trainer(sae, activation_stream, wandb_logger)
            self._initialize_optimizer(sae)

        assert self.optimizer is not None and self.scheduler is not None, (
            "Optimizer and scheduler should be already initialized"
        )

        maybe_local_d_sae = sae.cfg.d_sae  # if sae.device_mesh is None else sae.cfg.d_sae // sae.device_mesh.size()
        if sae.cfg.sae_type == "clt":
            act_freq_scores_shape = (
                sae.cfg.n_layers,  # type: ignore
                maybe_local_d_sae,
            )
        else:
            act_freq_scores_shape = (maybe_local_d_sae,)  # type: ignore
        log_info = {
            "act_freq_scores": torch.zeros(act_freq_scores_shape, device=sae.cfg.device, dtype=sae.cfg.dtype),
            "n_frac_active_tokens": torch.tensor([0], device=sae.cfg.device, dtype=torch.int),
        }
        proc_bar = tqdm(total=self.total_training_steps, smoothing=0.001, disable=not is_primary_rank(sae.device_mesh))
        proc_bar.update(self.cur_step)

        try:
            activation_stream = iter(activation_stream)
            batch = next(activation_stream)
            while True:
                with timer.time("training_iteration"):
                    proc_bar.update(1)

                    batch = sae.normalize_activations(batch)

                    sae.train()

                    with torch.autocast(device_type=sae.cfg.device, dtype=self.cfg.amp_dtype):
                        loss_dict = self._training_step(sae, batch)

                    log_info.update(loss_dict)
                    proc_bar.set_description(
                        f"loss: {log_info['loss'].item():.2f}, learning rate: {self.optimizer.param_groups[0]['lr']:.2e}"
                    )

                    if not self.cfg.skip_metrics_calculation:
                        self._log(sae, log_info, batch)

                    with timer.time("refresh_batch"):
                        del batch
                        batch = next(activation_stream)

                    with timer.time("backward"):
                        loss_dict["loss"].backward()

                    with timer.time("clip_grad_norm"):
                        # exclude the grad of the jumprelu threshold
                        assert sae.device_mesh is None or self.cfg.clip_grad_norm <= 0, (
                            "clip_grad_norm must be 0 for distributed training"
                        )
                        loss_dict["grad_norm"] = torch.nn.utils.clip_grad_norm_(
                            [
                                param
                                for name, param in sae.named_parameters()
                                if param.grad is not None and "log_jumprelu_threshold" not in name
                            ],
                            max_norm=self.cfg.clip_grad_norm if self.cfg.clip_grad_norm > 0 else math.inf,
                        )

                    with timer.time("optimizer_step"):
                        self.optimizer.step()
                        self.optimizer.zero_grad()

                    if eval_fn is not None and (self.cur_step + 1) % self.cfg.eval_frequency == 0:
                        with timer.time("evaluation"):
                            eval_fn(sae)

                    self._maybe_save_sae_checkpoint(sae)
                    with timer.time("scheduler_step"):
                        self.scheduler.step()

                    self.cur_step += 1
                    self.cur_tokens += batch_size(batch)
                    if self.cur_tokens >= self.cfg.total_training_tokens:
                        break
        except StopIteration:
            logger.info("the current stream has ended")
            return True
        except Exception as e:
            logger.error(f"Training failed: {e}")
            raise e
