from __future__ import annotations

import logging
import os

import numpy as np
from ray.rllib.algorithms import Algorithm
from ray.rllib.algorithms.callbacks import DefaultCallbacks
from ray.rllib.offline.json_reader import JsonReader
from ray.rllib.policy.sample_batch import (
    MultiAgentBatch,
    SampleBatch,
    concat_samples,
    convert_ma_batch_to_sample_batch,
)
from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2
from ray.rllib.utils.torch_utils import convert_to_torch_tensor

logger = logging.getLogger(__name__)


class CheckpointCallback(DefaultCallbacks):
    def on_train_result(self, *, algorithm: Algorithm, result: dict, **kwargs) -> None:
        if algorithm._storage:
            algorithm._storage.current_checkpoint_index += 1
            result["checkpoint_dir_name"] = algorithm._storage.checkpoint_dir_name
            algorithm._storage.current_checkpoint_index -= 1


def LoadCheckpointCallback(
    checkpoint_path: str,
    strict: bool = True,
    base_callback=CheckpointCallback,
):
    """Utility function to create a load checkpoint callback.

    Args:
        checkpoint_path (str): The path to **policy checkpoint**.
        strict (bool): Whether to load the checkpoint strictly.
        base_callback (MetricCallback): The base callback class.

    Returns:
        Callback: The load checkpoint callback class.
    """

    class _LoadCheckpointCallback(base_callback):
        def on_algorithm_init(self, *, algorithm: Algorithm, **kwargs) -> None:
            if not os.path.exists(checkpoint_path):
                logger.warning("Checkpoint path does not exist: %s", checkpoint_path)
                return super().on_algorithm_init(algorithm=algorithm, **kwargs)

            policies = TorchPolicyV2.from_checkpoint(checkpoint_path)
            weights = (
                {pid: policy.get_weights() for pid, policy in policies.items()}
                if isinstance(policies, dict)
                else {"default_policy": policies.get_weights()}
            )

            if strict:
                algorithm.set_weights(weights)
            else:
                worker = algorithm.workers.local_worker()
                for pid, weight in weights.items():
                    policy: TorchPolicyV2 = worker.policy_map[pid]
                    weight = convert_to_torch_tensor(weight, device=policy.device)
                    policy.model.load_state_dict(weight, strict=False)

            logger.info("Loaded checkpoint from: %s", checkpoint_path)
            super().on_algorithm_init(algorithm=algorithm, **kwargs)

    return _LoadCheckpointCallback


def MixOfflineCallback(
    dataset: "str | list[str]",
    mix_ratio_fn=None,
    base_callback=CheckpointCallback,
):
    """Utility function to create a mix offline callback.

    Args:
        dataset (str | list[str]): Either a glob expression for files, or a list of file paths.
        mix_ratio_fn (Callable): A function returning the mix ratio schedule.
        base_callback (MetricCallback): The base callback class.

    Returns:
        Callback: The mix offline callback class.
    """

    class _MixOfflineCallback(base_callback):
        def __init__(self, legacy_callbacks_dict=None):
            super().__init__(legacy_callbacks_dict)

            self.reader = JsonReader(dataset)
            self.ratio_fn = mix_ratio_fn or (lambda _: 0.1)
            self.num_steps = 0

        def on_sample_end(self, *, worker, samples, **kwargs):
            self.num_steps += samples.count
            if isinstance(samples, SampleBatch):
                self._mix_batch(worker, samples)
            elif isinstance(samples, MultiAgentBatch):
                for policy_id, sample in samples.policy_batches.items():
                    self._mix_batch(worker, sample)

        def _mix_batch(self, worker, batch: SampleBatch):
            mix_ratio = self.ratio_fn(self.num_steps)
            online_batch_size = len(batch)
            offline_batch_size = int(online_batch_size * mix_ratio)
            if offline_batch_size <= 0:
                return

            # Collect offline samples
            offline_data = []
            collected_size = 0
            while collected_size < offline_batch_size:
                next_batch = self.reader.next()
                if collected_size + next_batch.count <= offline_batch_size:
                    offline_data.append(next_batch)
                    collected_size += next_batch.count
                else:
                    # Truncate the batch to match our needs
                    truncated_batch = next_batch.slice(
                        0, offline_batch_size - collected_size
                    )
                    offline_data.append(truncated_batch)
                    collected_size = offline_batch_size
                    break

            # Combine all offline batches if multiple were collected
            if len(offline_data) > 1:
                offline_batch = concat_samples(offline_data)
            else:
                offline_batch = offline_data[0]
            offline_batch = convert_ma_batch_to_sample_batch(offline_batch)

            # Key fields that must be mixed
            essential_keys = [
                SampleBatch.OBS,
                SampleBatch.NEXT_OBS,
                SampleBatch.ACTIONS,
                SampleBatch.REWARDS,
                SampleBatch.TERMINATEDS,
                SampleBatch.TRUNCATEDS,
            ]
            for key in essential_keys:
                batch[key] = np.concatenate(
                    [
                        batch[key][offline_batch_size:],
                        offline_batch[key][:offline_batch_size],
                    ],
                    axis=0,  # along batch dimension
                )

            extra_keys = [key for key in batch.keys() if key not in essential_keys]
            for key in extra_keys:
                if key in offline_batch:
                    batch[key] = np.concatenate(
                        [
                            batch[key][offline_batch_size:],
                            offline_batch[key][:offline_batch_size],
                        ],
                        axis=0,  # along batch dimension
                    )
                else:
                    batch[key] = np.concatenate(
                        [
                            batch[key][offline_batch_size:],
                            np.zeros(offline_batch_size, dtype=batch[key].dtype),
                        ],
                        axis=0,  # along batch dimension
                    )

            shuffle_indices = np.random.permutation(batch.count)
            for key in batch.keys():
                batch[key] = batch[key][shuffle_indices]

    return _MixOfflineCallback
