from typing import Dict, List, Tuple

import gymnasium as gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.modelv2 import restore_original_dimensions
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.offline.json_reader import JsonReader
from ray.rllib.policy.sample_batch import convert_ma_batch_to_sample_batch
from ray.rllib.utils.annotations import override


class DistillModel(TorchModelV2, nn.Module):
    def __init__(
        self,
        obs_space: gym.Space,
        action_space: gym.Space,
        num_outputs: int,
        model_config: Dict,
        name: str,
    ):
        TorchModelV2.__init__(
            self, obs_space, action_space, num_outputs, model_config, name
        )
        nn.Module.__init__(self)
        self.origin_space = getattr(obs_space, "original_space", obs_space)

        internal_model_config = model_config["custom_model_config"]
        self.alpha = internal_model_config.get("alpha", 0.1)
        self.alpha_end = internal_model_config.get("alpha_end", 0.1)
        self.alpha_decay = internal_model_config.get("alpha_decay", 0.99)
        self.decay_iter = internal_model_config.get("decay_iter", 1000)
        self.current_iter = 0

        self.teacher_model = ModelCatalog.get_model_v2(
            obs_space,
            action_space,
            num_outputs,
            internal_model_config["teacher_model"],
            "torch",
            "teacher_model",
        )
        self.student_model = ModelCatalog.get_model_v2(
            obs_space,
            action_space,
            num_outputs,
            internal_model_config["student_model"],
            "torch",
            "student_model",
        )

        self._value = None

    @override(TorchModelV2)
    def forward(
        self,
        input_dict: Dict[str, torch.Tensor],
        state: List[torch.Tensor],
        seq_lens: torch.Tensor,
    ) -> Tuple[torch.Tensor, List[torch.Tensor]]:
        """Forward pass for the model.

        Args:
            input_dict (Dict[str, torch.Tensor]): The input dictionary from the environment.
            state (List[torch.Tensor]): List of RNN states (if any).
            seq_lens (torch.Tensor): Sequence lengths (if applicable).

        Returns:
            Tuple[torch.Tensor, List[torch.Tensor]]: The output features and the updated state.
        """
        logits, state = self.student_model(input_dict, state, seq_lens)
        return logits, state

    @override(TorchModelV2)
    def custom_loss(self, policy_loss, loss_inputs):
        """Compute the total loss, including policy loss and KL divergence loss."""
        self.current_iter += 1

        with torch.no_grad():
            teacher_logits, _ = self.teacher_model(loss_inputs, [], None)

        student_logits, _ = self.student_model(loss_inputs, [], None)

        kl_loss = F.kl_div(
            F.log_softmax(student_logits, dim=-1),
            F.softmax(teacher_logits, dim=-1),
            reduction="batchmean",
        )
        self.policy_loss_metric = np.mean([loss.item() for loss in policy_loss])
        self.kl_loss_metric = kl_loss.item()

        # Optional: Decay alpha over iterations
        if self.current_iter <= self.decay_iter:
            decay_factor = (1 - self.alpha_decay) ** self.current_iter
            self.alpha = max(self.alpha_end, self.alpha * decay_factor)
        else:
            self.alpha = self.alpha_end

        # Alternatively (if custom loss has its own optimizer):
        # return policy_loss + [self.alpha * kl_loss]
        return [loss_ + self.alpha * kl_loss for loss_ in policy_loss]

    @override(TorchModelV2)
    def metrics(self):
        return {
            "policy_loss": self.policy_loss_metric,
            "distill_kl_loss": self.kl_loss_metric,
        }

    @override(TorchModelV2)
    def value_function(self) -> torch.Tensor:
        """Computes the value function for the current state.

        Returns:
            torch.Tensor: The value estimates.
        """
        return self.student_model.value_function()


class SuperviseModel(TorchModelV2, nn.Module):
    def __init__(self, obs_space, action_space, num_outputs, model_config, name):
        TorchModelV2.__init__(
            self, obs_space, action_space, num_outputs, model_config, name
        )
        nn.Module.__init__(self)
        self.origin_space = getattr(obs_space, "original_space", obs_space)
        internal_model_config = model_config["custom_model_config"]

        dataset = internal_model_config["dataset"]
        batch_size = internal_model_config.get("batch_size", 256)
        self.reader = JsonReader(dataset)
        self.reader.batch_size = batch_size

        self.inner_model = ModelCatalog.get_model_v2(
            obs_space,
            action_space,
            num_outputs,
            internal_model_config,
            "torch",
            "inner_model",
        )

    @override(TorchModelV2)
    def forward(self, input_dict, state, seq_lens):
        return self.inner_model(input_dict, state, seq_lens)

    @override(TorchModelV2)
    def value_function(self):
        return self.inner_model.value_function()

    @override(TorchModelV2)
    def custom_loss(self, policy_loss, loss_inputs):
        batch = self.reader.next()
        batch = convert_ma_batch_to_sample_batch(batch)

        if batch.count > self.reader.batch_size:
            batch = batch.slice(0, self.reader.batch_size)

        obs = restore_original_dimensions(
            torch.from_numpy(batch["obs"]).float().to(policy_loss[0].device),
            self.obs_space,
            tensorlib="torch",
        )
        logits, _ = self.forward({"obs": obs}, [], None)

        # Compute the IL loss.
        dist_cls, _ = ModelCatalog.get_action_dist(
            self.action_space,
            self.model_config,
            framework="torch",
        )
        action_dist = dist_cls(logits, self.model_config)
        imitation_loss = torch.mean(
            -action_dist.logp(
                torch.from_numpy(batch["actions"]).to(policy_loss[0].device)
            )
        )
        self.imitation_loss_metric = imitation_loss.item()
        self.policy_loss_metric = np.mean([loss.item() for loss in policy_loss])

        # Add the imitation loss to each already calculated policy loss term.
        # Alternatively (if custom loss has its own optimizer):
        # return policy_loss + [10 * self.imitation_loss]
        return [loss_ + 10 * imitation_loss for loss_ in policy_loss]

    @override(TorchModelV2)
    def metrics(self):
        return {
            "policy_loss": self.policy_loss_metric,
            "imitation_loss": self.imitation_loss_metric,
        }


ModelCatalog.register_custom_model("distill_model", DistillModel)
ModelCatalog.register_custom_model("supervise_model", SuperviseModel)
