# === lib/data.py ===
import random
from datasets import load_dataset
import torch

def get_ptb(nsamples: int, seed: int, seqlen: int, tokenizer) -> list:
    random.seed(seed)  # 确保可复现
    data = load_dataset('ptb_text_only', 'penn_treebank', split='train')
    enc = tokenizer(' '.join(data['text']), return_tensors='pt')
    ids = enc.input_ids[0]
    samples = []
    for _ in range(nsamples):
        idx = random.randint(0, ids.size(0) - seqlen - 1)
        samples.append(ids[idx:idx+seqlen].unsqueeze(0))
    return samples

def get_wikitext2(nsamples: int, seed: int, seqlen: int, tokenizer) -> list:
    random.seed(seed)
    # 从本地 raw 文本加载
    traindata = load_dataset(
        'text',
        data_files={'train': '/home/mht/FLAP/dataset/wikitext-raw/wiki.train.raw'},
        split='train'
    )
    ids = tokenizer(" ".join(traindata['text']), return_tensors='pt').input_ids[0]
    samples = []
    for _ in range(nsamples):
        idx = random.randint(0, ids.size(0) - seqlen - 1)
        samples.append(ids[idx:idx+seqlen].unsqueeze(0))
    return samples

def get_loaders(name: str, nsamples: int, seed: int, seqlen: int, tokenizer) -> list:
    if name == 'ptb':
        return get_ptb(nsamples, seed, seqlen, tokenizer)
    elif name == 'wikitext2':
        return get_wikitext2(nsamples, seed, seqlen, tokenizer)
    else:
        raise ValueError(f"Unsupported dataset: {name}")
