"""
数据集处理：加载语料 → 分词 → 构建GPT风格样本
"""
import json
from typing import List, Dict, Any
from torch.utils.data import Dataset
import torch

# ✅ 改成绝对导入
from src.tokenizer import JiaboTokenizer


class JiaboDataset(Dataset):
    """
    GPT风格无监督数据集：每个样本为长度为max_seq_length的token序列
    Labels自动右移一位，实现自回归预测
    """
    
    def __init__(
        self,
        corpus_path: str,
        tokenizer: JiaboTokenizer,
        max_length: int = 512,
        stride: int = 256,
    ):
        self.corpus_path = corpus_path
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.stride = stride
        
        # 加载并编码所有文本
        self.examples = self._load_and_encode()
    
    def _load_and_encode(self) -> List[Dict[str, torch.Tensor]]:
        """加载语料并切成固定长度片段"""
        examples = []
        
        with open(self.corpus_path, "r", encoding="utf-8") as f:
            text = f.read()
        
        tokens = self.tokenizer.encode(text)
        
        # 滑动窗口切分长序列
        for i in range(0, len(tokens), self.stride):
            input_ids = tokens[i:i + self.max_length]
            
            if len(input_ids) < self.max_length:
                # 短序列用PAD填充
                input_ids += [self.tokenizer.vocab[self.tokenizer.pad_token]] * (self.max_length - len(input_ids))
            
            examples.append({
                "input_ids": torch.tensor(input_ids, dtype=torch.long),
            })
        
        return examples
    
    def __len__(self) -> int:
        return len(self.examples)
    
    def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
        item = self.examples[idx]
        # Labels与input_ids相同，但会右移一位
        labels = item["input_ids"].clone()
        return {
            "input_ids": item["input_ids"],
            "labels": labels,
        }
