from copy import deepcopy
from random import random
from typing import List

import torch


def text_infilling(input_ids: List, attention_mask: List, args, tokenizer):
    """
    text_filling，将input_ids中的每句句子中的每个单词以一定概率进行mask，mask的长度为参数为3的泊松分布
    在原地修改
    :param input_ids: batch_size * max_seq_length
    :param attention_mask: batch_size * max_seq_length 有词的位置是1，padding的位置是0
    """
    # logger.debug("input_ids = " + str(input_ids[0:5]))
    mask_token_id = tokenizer.get_vocab()[tokenizer.mask_token]
    padding_token_id = tokenizer.get_vocab()[tokenizer.pad_token]
    for i in range(len(input_ids)):
        j = 0
        while j < len(input_ids[i]):
            # 跳过special_ids，其余的有mask_probability的概率被mask
            if input_ids[i][j] not in tokenizer.all_special_ids and random() < args.mask_probability:
                length = int(torch.poisson(torch.tensor(3.0)))  # mask的长度为参数为3的泊松分布
                if length == 0:  # length为0时，表示直接插入mask_token_id
                    input_ids[i].insert(j, mask_token_id)
                    attention_mask[i].insert(j, 1)
                    j += 2
                    continue
                else:  # length不为0，则替换j:j+length为mask_token_id
                    input_ids[i][j:j + length] = [mask_token_id]
                    attention_mask[i][j:j + length] = [1]
            j += 1
        # 处理之后padding到max_seq_length
        seq_len = len(input_ids[i])
        if seq_len < args.max_seq_length:
            input_ids[i] = input_ids[i] + [padding_token_id] * (args.max_seq_length - len(input_ids[i]))
            attention_mask[i] = attention_mask[i] + [0] * (args.max_seq_length - len(attention_mask[i]))
        elif seq_len > args.max_seq_length:
            input_ids[i] = input_ids[i][:args.max_seq_length]
            attention_mask[i] = attention_mask[i][:args.max_seq_length]
    # logger.debug("after text infilling, input_ids = " + str(input_ids[0:5]))


def preprocess(data, tokenizer, args):
    data = tokenizer(data["text"], padding="max_length", truncation=True, max_length=args.max_seq_length)
    # data["labels"] = data["input_ids"].copy()  # 这是浅拷贝
    data["labels"] = deepcopy(data["input_ids"])  # 这是深拷贝
    # text_infilling(data["input_ids"])
    return data
