# dataset/dataset.py
import random
import torch
from torch.utils.data import Dataset
from config.config import train_cfg

class StreamTokenDataset(Dataset):
    """Memory-efficient streaming token dataset"""
    def __init__(self, file_path, seq_len, max_tokens, start_offset=None):
        self.file_path = file_path # tokens_IDs路径
        self.seq_len = seq_len # 窗口长度
        self.max_tokens = max_tokens # 读取长度
        self.start_offset = start_offset # 读取起始位置偏移量
        self.tokens = [] # 存储tokens_IDs的list，[12,14,56,12,...,99],长度max_tokens

        print(f"🔄 Loading tokens from {file_path} (max={max_tokens})...")

        # 如果未指定起始偏移，先扫描文件估算总 token 数，然后随机选一个起始点
        if self.start_offset is None:
            total_tokens = self._estimate_token_count()
            # 确保有足够的后续 token
            max_possible_offset = max(0, total_tokens - max_tokens)
            self.start_offset = random.randint(0, max_possible_offset//10)
            print(f"🎯 Random start offset: {self.start_offset}")

        self._load_tokens_with_offset()

        print(f"✅ Loaded {len(self.tokens)} tokens (offset={self.start_offset}).")

        # 一共加载train_cfg.steps_per_epoch * train_cfg.batch_size * model_cfg.block_size+1 个token
        # 最后一个留给label的最后一位
        # 样本数 = (总token数-1) / seq_len
        self.num_chunks = (len(self.tokens)-1) // self.seq_len

    def _estimate_token_count(self) -> int:
        """估算文件中总 token 数（用于确定随机偏移范围）
        count = 0
        with open(self.file_path, 'r', encoding='utf-8') as f:
            buffer = ""
            while True:
                chunk = f.read(1024 * 1024)
                if not chunk:
                    break
                buffer += chunk
                parts = buffer.split(' ')
                complete = parts[:-1]
                buffer = parts[-1]
                count += len([p for p in complete if p.strip()])
                if count > 1e7:  # 限制估算量，避免太慢
                    break
        """
        # owt_tokens_IDs:       564053823
        # owt_tokens_IDs_mini:  284655204
        # pretrain_corpus:      300869004
        return 300869004

    def _load_tokens_with_offset(self):
        """从指定偏移量开始加载 max_tokens 个 token"""
        buffer = ""  # 缓存上一个chunk未完成的 token_id部分
        tokens_loaded = 0
        tokens_skipped = 0

        with open(self.file_path, 'r', encoding='utf-8') as f:
            while tokens_loaded < self.max_tokens:
                chunk = f.read(train_cfg.chunk_size) # 读 1MB
                if not chunk:
                    break
                buffer += chunk # 累积到 buffer
                parts = buffer.split(' ') # 按空格切分
                # ①正好切分buffer=“12 14 16” -> part=["12","14","16"]
                # ②末尾空格buffer=“12 14 16 ” -> part=["12","14","16",""]
                # ③截断下个buffer=“12 14 16 25” (下一位完整是256) -> part=["12","14","16","25"]

                # 除了最后一个外，都是确定完整的 token_id
                complete_tokens = parts[:-1]

                # 最后一个是不确定的，留到下次
                buffer = parts[-1]
                # ①正好切分，末尾是“16”，下次开头是“ 256 89 ...”有空格
                # ②末尾空格，末尾是“”，下次开头是“256 89 ...”无空格
                # ③截断下个，末尾是“25”，下次开头是“6 89 ...”和被截断的拼接上了

                # 完整 token 转化为int 加入 序列
                for token_str in complete_tokens:
                    if tokens_skipped < self.start_offset:
                        tokens_skipped += 1
                    else:
                        self.tokens.append(int(token_str))
                        tokens_loaded += 1
                        if tokens_loaded >= self.max_tokens:
                            break
                if tokens_loaded >= self.max_tokens:
                    break

            # 处理最后 buffer 中可能剩下的完整 token
            if buffer and tokens_skipped >= self.start_offset and tokens_loaded < self.max_tokens:
                self.tokens.append(int(buffer))

    def __len__(self):
        return self.num_chunks

    def __getitem__(self, idx):
        start = idx * self.seq_len # 窗口移动步长为窗口长度，意味着窗口无重叠，最大化利用大样本数据训练
        x = torch.tensor(self.tokens[start:start + self.seq_len], dtype=torch.long)         # 样本
        y = torch.tensor(self.tokens[start + 1:start + 1 + self.seq_len], dtype=torch.long) # 标签 向右移动一位
        return x, y