from datasets import load_dataset
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
import torch

def load_tokenized_dataset(path="wikitext", name="wikitext-2-raw-v1", 
                           pretrained_model_name_or_path="bert-base-uncased",
                           dataset_part="train", batch_size=16):
    # 加载数据集
    dataset = load_dataset(path, name, cache_dir="./cache")

    # 加载预训练分词器
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)

    # 预处理函数
    def tokenize_function(examples):
        return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=128)

    # 应用预处理
    tokenized_datasets = dataset.map(tokenize_function, batched=True)

    # 设置格式为PyTorch张量
    tokenized_datasets.set_format(type='torch', columns=['input_ids', 'attention_mask', 'token_type_ids'])

    # 一次性生成静态掩码
    def create_static_masked_inputs(examples, tokenizer, mlm_probability=0.15):
        inputs = examples["input_ids"]
        # 复制输入ID用于标签
        labels = inputs.clone()
        
        # 创建掩码张量
        probability_matrix = torch.full(labels.shape, mlm_probability)
        
        # 特殊标记的掩码设为0
        special_tokens_mask = [
            tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
        ]
        probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
        
        # 随机掩码
        masked_indices = torch.bernoulli(probability_matrix).bool()
        labels[~masked_indices] = -100  # 只计算掩码位置的损失
        
        # 80%的概率用[MASK]替换，10%的概率随机词，10%的概率不变
        indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
        indices_random = torch.bernoulli(torch.full(labels.shape, 0.1)).bool() & masked_indices & ~indices_replaced
        
        # 用[MASK]替换
        inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
        
        # 随机词替换
        random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
        inputs[indices_random] = random_words[indices_random]
        
        examples["input_ids"] = inputs
        examples["labels"] = labels
        return examples

    tokenized_datasets = tokenized_datasets.map(lambda x: create_static_masked_inputs(x, tokenizer), batched=True)

    # 创建数据加载器
    dataloader = DataLoader(tokenized_datasets[dataset_part], batch_size=batch_size, shuffle=True)
    
    return tokenizer, dataloader