import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from typing import Tuple


def get_batch(x: np.ndarray, batch_size: int, context_length: int, device: str):
    """
    Takes a numpy array of token IDs, batches them, and returns a pair of tensors:
    - (batch_size, context_length) — the actual batch
    - (batch_size, context_length) — the next token ID for each sample in the batch

    Args:
      x: np.ndarray — integer array of training data
      batch_size: int — number of samples per batch
      context_length: int — length of each sequence in batch
      device: str — device to load the data on
    """
    # 计算最大有效起始索引，确保有足够的token组成完整序列加一个预测token
    max_start_idx = len(x) - context_length - 1

    if max_start_idx < 0:
        raise ValueError(
            f"Input array length {len(x)} is too short for context_length {context_length}"
        )

    # 一次性随机采样batch_size个起始位置
    start_indices = np.random.randint(0, max_start_idx + 1, size=batch_size)

    # 准备数组来存储序列
    x_sequences = np.zeros((batch_size, context_length), dtype=np.int64)
    y_sequences = np.zeros((batch_size, context_length), dtype=np.int64)

    # 用适当的序列填充数组
    for i, start_idx in enumerate(start_indices):
        x_sequences[i] = x[start_idx : start_idx + context_length]  # 输入序列
        y_sequences[i] = x[start_idx + 1 : start_idx + context_length + 1]  # 目标序列（下一个token）

    # 将numpy数组转换为torch张量
    x_batch = torch.from_numpy(x_sequences)
    y_batch = torch.from_numpy(y_sequences)

    if device.startswith("cuda"):
        # 如果在GPU上，固定内存以加速传输
        x_batch, y_batch = (
            x_batch.pin_memory().to(device, non_blocking=True),
            y_batch.pin_memory().to(device, non_blocking=True),
        )
    else:
        # 将张量移动到指定设备
        x_batch, y_batch = x_batch.to(device), y_batch.to(device)

    return x_batch, y_batch


class SequentialTokenDataset(Dataset):
    """
    A PyTorch Dataset for language modeling that sequentially traverses
    the token array with random starting positions within a reasonable range.
    用于语言建模的PyTorch数据集，顺序遍历token数组，并在合理范围内使用随机起始位置
    """

    def __init__(
        self, data: np.ndarray, context_length: int, samples_per_epoch: int = None
    ):
        """
        Initialize the dataset.

        Args:
          data: np.ndarray — integer array of training data
          context_length: int — length of each sequence in batch
          samples_per_epoch: int — number of samples per epoch (if None, use all possible)
        """
        # 保存数据、上下文长度和计算最大起始索引
        self.data = data
        self.context_length = context_length
        self.max_start_idx = len(data) - context_length - 1

        if self.max_start_idx < 0:
            raise ValueError(
                f"Input array length {len(data)} is too short for context_length {context_length}"
            )

        # 如果samples_per_epoch未指定或大于可能的最大值，则使用所有可能的样本
        if samples_per_epoch is None or samples_per_epoch > self.max_start_idx:
            self.samples_per_epoch = self.max_start_idx
        else:
            self.samples_per_epoch = samples_per_epoch

        # 计算基础位置，用于顺序遍历
        self.base_position = (
            np.random.randint(
                0,
                (
                    self.max_start_idx
                    - np.random.randint(0, self.max_start_idx // self.samples_per_epoch)
                    * self.context_length
                ),
            )
            - 1
        )

    def __len__(self) -> int:
        # 返回每个epoch的样本数量
        return self.samples_per_epoch

    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Get a single input-target pair.

        Args:
          idx: int — index in the dataset

        Returns:
          tuple of (input_sequence, target_sequence)
        """
        # 对于顺序遍历，将索引映射到数据中的位置

        # 计算起始索引
        start_idx = self.base_position + idx * self.context_length

        # 确保不超出边界
        start_idx = min(start_idx, self.max_start_idx)

        # 获取输入序列（当前token序列）
        x = torch.from_numpy(
            self.data[start_idx : start_idx + self.context_length].astype(np.int64)
        )
        # 获取目标序列（下一个token序列）
        y = torch.from_numpy(
            self.data[start_idx + 1 : start_idx + context_length + 1].astype(
                np.int64
            )
        )
        return x, y


def create_data_loader(
    x: np.ndarray,
    batch_size: int,
    context_length: int,
    shuffle: bool = True,
    num_workers: int = 0,
    pin_memory: bool = True,
    samples_per_epoch: int = None,
) -> DataLoader:
    """
    Create a PyTorch DataLoader for efficient batch loading.

    Args:
      x: np.ndarray — integer array of training data
      batch_size: int — number of samples per batch
      context_length: int — length of each sequence in batch
      shuffle: bool — whether to shuffle the data
      num_workers: int — number of worker processes for data loading
      pin_memory: bool — whether to use pinned memory (for GPU training)
      samples_per_epoch: int — number of samples per epoch (if None, use a reasonable default)

    Returns:
      DataLoader — PyTorch DataLoader instance
    """
    # 如果未指定samples_per_epoch，使用一个不会导致内存问题的合理默认值
    if samples_per_epoch is None:
        # 使用10000或总可能样本数的1/100中的较小值
        max_possible = len(x) - context_length - 1
        samples_per_epoch = min(10000, max_possible // 100)

    # 创建SequentialTokenDataset实例
    dataset = SequentialTokenDataset(x, context_length, samples_per_epoch)
    # 创建并返回DataLoader实例
    return DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=pin_memory,
        persistent_workers=num_workers > 0,  # 如果有工作进程，则保持它们持久化
    )
