from datasets import load_dataset
from torch.utils.data import DataLoader
import torch
import random
from transformers import AutoTokenizer

def load_tokenized_dataset(path="wikitext", name="wikitext-2-raw-v1", 
                          pretrained_model_name_or_path="bert-base-uncased",
                          dataset_part="train", add_permutation=True, batch_size=16):
    dataset = load_dataset(path, name, cache_dir="./cache")
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
    
    def tokenize_function(examples):
        # 基本的tokenization
        tokenized = tokenizer(examples["text"], padding="max_length", truncation=True, max_length=128)
        
        if add_permutation:
            # 为每个句子添加词序打乱恢复任务
            permutation_labels = []
            permuted_input_ids = []
            
            for input_ids in tokenized["input_ids"]:
                # 过滤掉padding token (0) 和特殊token ([CLS], [SEP])
                real_tokens = [idx for idx in input_ids if idx not in [0, tokenizer.cls_token_id, tokenizer.sep_token_id]]
                
                # 对于短句子，创建"未打乱"的标签
                if len(real_tokens) < 3:
                    position_labels = [-100] * len(input_ids)
                    for i, token_idx in enumerate(input_ids):
                        if token_idx not in [0, tokenizer.cls_token_id, tokenizer.sep_token_id]:
                            position_labels[i] = i  # 位置不变
                    permutation_labels.append(position_labels)
                    permuted_input_ids.append(input_ids.copy())
                    continue
                
                # 记录原始位置
                original_indices = list(range(len(real_tokens)))
                
                # 只打乱最多40%的词（保留至少60%在原位）
                max_shuffle = max(1, len(real_tokens) // 5 * 2)  # 40%的词
                indices_to_shuffle = random.sample(original_indices, min(max_shuffle, len(real_tokens)))
                
                # 打乱选中的词
                shuffled_indices = indices_to_shuffle.copy()
                random.shuffle(shuffled_indices)
                
                # 创建映射：原位置 -> 新位置
                position_map = {i: i for i in original_indices}  # 初始时所有位置不变
                for old_idx, new_idx in zip(indices_to_shuffle, shuffled_indices):
                    position_map[old_idx] = new_idx
                
                # 应用打乱后的顺序
                shuffled_tokens = [real_tokens[position_map[i]] for i in original_indices]
                
                # 构建打乱后的input_ids（保留[CLS]和[SEP]在原位）
                permuted_ids = [tokenizer.cls_token_id]
                permuted_ids.extend(shuffled_tokens)
                permuted_ids.append(tokenizer.sep_token_id)
                
                # 填充到最大长度
                while len(permuted_ids) < len(input_ids):
                    permuted_ids.append(0)
                
                # 创建位置标签（每个token的原始位置）
                position_labels = [-100] * len(input_ids)  # -100表示忽略
                for i, token_idx in enumerate(input_ids):
                    if token_idx in [0, tokenizer.cls_token_id, tokenizer.sep_token_id]:
                        continue
                    # 找到该token在原始句子中的位置
                    original_pos = original_indices[real_tokens.index(token_idx)]
                    # 对应的打乱后的位置
                    shuffled_pos = list(position_map.values()).index(original_pos)
                    position_labels[i] = shuffled_pos
                
                permutation_labels.append(position_labels)
                permuted_input_ids.append(permuted_ids)
            
            tokenized["permuted_input_ids"] = permuted_input_ids
            tokenized["permutation_labels"] = permutation_labels
        
        return tokenized
    
    tokenized_datasets = dataset.map(tokenize_function, batched=True)
    tokenized_datasets.set_format(
        type='torch', 
        columns=['input_ids', 'attention_mask', 'permuted_input_ids', 'permutation_labels']
    )
    
    dataloader = DataLoader(
        tokenized_datasets[dataset_part], 
        batch_size=batch_size, 
        shuffle=True
    )
    
    return tokenizer, dataloader
    
    
def create_masked_inputs(inputs, tokenizer, device, mlm_probability=0.15):
    # 复制输入ID用于标签
    labels = inputs.clone()
    
    # 创建掩码张量
    probability_matrix = torch.full(labels.shape, mlm_probability)
    
    # 特殊标记的掩码设为0
    special_tokens_mask = [
        tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
    ]
    probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
    
    # 随机掩码
    masked_indices = torch.bernoulli(probability_matrix).bool()
    labels[~masked_indices] = -100  # 只计算掩码位置的损失
    
    # 80%的概率用[MASK]替换，10%的概率随机词，10%的概率不变
    indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
    indices_random = torch.bernoulli(torch.full(labels.shape, 0.1)).bool() & masked_indices & ~indices_replaced
    
    # 用[MASK]替换
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
    
    # 随机词替换
    random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long, device=device)
    inputs[indices_random] = random_words[indices_random]
    
    # 输入和标签返回
    return inputs, labels