import torch
from transformers import RobertaConfig, RobertaForMaskedLM,BertTokenizer,BertConfig
import pickle, random, copy

random.seed(2021)
tokenizer = BertTokenizer.from_pretrained("bert_base_chinese/vocab.txt")
vocab_size = len(tokenizer.vocab)

def torch_mask_tokens(inputs, tokenizer,special_tokens_mask= None,mlm_probability=0.15):
    """
    Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
    """
    labels = inputs.clone()
    # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
    probability_matrix = torch.full(labels.shape, mlm_probability)
    if special_tokens_mask is None:
        special_tokens_mask = [
            tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
        ]
        special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
    else:
        special_tokens_mask = special_tokens_mask.bool()

    probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
    masked_indices = torch.bernoulli(probability_matrix).bool()
    labels[~masked_indices] = -100  # We only compute loss on masked tokens

    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
    indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)

    # 10% of the time, we replace masked input tokens with random word
    indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
    random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
    inputs[indices_random] = random_words[indices_random]

    # The rest of the time (10% of the time) we keep the masked input tokens unchanged
    return inputs, labels

def mask_token(input_ids):
    max_pred = 5  # max tokens of prediction
    labels = []
    # print("input_ids:",input_ids)
    input_ids = list(input_ids.squeeze(0).numpy())
    input = copy.deepcopy(input_ids)
    cand_maked_pos = [i for i in range(len(input_ids)) if input_ids[i] not in [0, 1, 2]]
    random.shuffle(cand_maked_pos)
    # print("cand_maked_pos:",cand_maked_pos,len(input_ids))
    n_pred = min(max_pred, max(1, int(len(input_ids) * 0.15)))  # 15 % of tokens in one sentence
    # print("n_pred:",n_pred)
    masked_pos = []
    masked_token = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
    for pos in cand_maked_pos:
        if len(masked_pos) > n_pred:
            break
        if random.random() < 0.8:  # 80%
            masked_pos.append(pos)
            input_ids[pos] = masked_token  # make mask
        elif random.random() > 0.9:  # 10% 随机替换成词表中其他的词
            index = random.randint(0, vocab_size - 1)  # random index in vocabulary
            while index in [101, 102, 100, 0]:  # can't involve 'CLS', 'SEP', 'PAD'
                index = random.randint(0, vocab_size - 1)
            masked_pos.append(pos)
            input_ids[pos] = index  # replace
    # print("masked_pos:",masked_pos)
    labels=[input[i] if i in masked_pos else -100 for i in range(len(input))]
    # print("labels:",labels)
    return torch.LongTensor(input_ids).unsqueeze(0), torch.LongTensor(labels).unsqueeze(0)

class MyDataset():
    def __init__(self, path, probability):
        with open(path, 'rb') as out_file:
            p = pickle.load(out_file)
            h = pickle.load(out_file)
            print("len(p),len(h):", len(p), len(h))
            length = list(range(0, len(p)))
            random.shuffle(length)
            remove = length[:int(len(length) * probability)]  # 60%作为负例子,随机替换药方
            M = dict()
            for i in range(len(p)):
                M[i] = 0
            for i in remove:
                M[i] = 1
            self.train_data = []
            for i in range(len(p)):
                if M[i] == 1:
                    while True:
                        k = random.randint(0, len(p)-1)
                        if k != i:
                            break
                    self.train_data.append((p[i], h[k], 0))
                else:
                    self.train_data.append((p[i], h[i], 1))

    def __getitem__(self, idx):
        return self.train_data[idx]

    def __len__(self):
        return len(self.train_data)

def truncate_seq_pair (tokens_a, tokens_b, max_num_tokens, rng):
    while True:
        total_length = len(tokens_a) + len(tokens_b)
        if total_length <= max_num_tokens:
            break
        trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
        assert len(trunc_tokens) >= 1
        # We want to sometimes truncate from the front and sometimes from the
        # back to add more randomness and avoid biases.
        if rng.random() < 0.5:
          del trunc_tokens[0]
        else:
          trunc_tokens.pop()
    return tokens_a,tokens_b
def ci_token(max_length, item):
    encoded_dict = dict()
    token1 = tokenizer.tokenize(item[0])
    token2 = item[1].split('、')
    #print("token1,token2:", token1, token2)
    max_num_tokens = max_length - 3
    rng = random.Random(12345)
    token1, token2 = truncate_seq_pair(token1, token2, max_num_tokens, rng)
    # print("token1,token2:", token1, token2)
    input_ids, token_type_ids, attention_mask = [], [], []
    input_ids.append(tokenizer.convert_tokens_to_ids('[CLS]'))
    token_type_ids.append(0)
    attention_mask.append(1)
    for t in token1:
        input_ids.append(tokenizer.convert_tokens_to_ids(t))
        token_type_ids.append(0)
        attention_mask.append(1)
    input_ids.append(tokenizer.convert_tokens_to_ids('[SEP]'))
    token_type_ids.append(0)
    attention_mask.append(1)
    for t in token2:
        if tokenizer.convert_tokens_to_ids(t) != None:
            input_ids.append(tokenizer.convert_tokens_to_ids(t))
            token_type_ids.append(1)
            attention_mask.append(1)
    input_ids.append(tokenizer.convert_tokens_to_ids('[SEP]'))
    token_type_ids.append(1)
    attention_mask.append(1)
    while len(input_ids) < max_length:
        input_ids.append(1)
        token_type_ids.append(0)
        attention_mask.append(0)
    assert len(input_ids) == max_length
    assert len(token_type_ids) == max_length
    assert len(attention_mask) == max_length

    # print("input_ids:",input_ids)
    input_ids = torch.LongTensor(input_ids).unsqueeze(0)
    token_type_ids = torch.LongTensor(token_type_ids).unsqueeze(0)
    attention_mask = torch.LongTensor(attention_mask).unsqueeze(0)
    encoded_dict['input_ids'] = input_ids
    encoded_dict['token_type_ids'] = token_type_ids
    encoded_dict['attention_mask'] = attention_mask
    return encoded_dict

def make_data(batch_data):
    all_special_ids = [0, 1, 2]
    input_ids, token_type_ids, attention_mask, labels, isnext = [], [], [], [], []
    rng = random.Random(12345)
    for item in batch_data:
        # if rng.random() > 0.5:
            # print(item[0],item[1])
        encoded_dict = tokenizer(item[0], item[1], return_tensors="pt", padding='max_length', max_length=512, truncation=True)
        # else:
        #     encoded_dict = ci_token(512, item)
        encoded_dict["input_ids"], encoded_dict["labels"] = mask_token(encoded_dict["input_ids"])
        # special_tokens_mask = [1 if token in all_special_ids else 0 for token in list(encoded_dict["input_ids"][0])]
        # encoded_dict["input_ids"], encoded_dict["labels"] = torch_mask_tokens(encoded_dict["input_ids"], tokenizer,torch.tensor(special_tokens_mask), 0.15)
        input_ids.append(encoded_dict['input_ids'])
        token_type_ids.append(encoded_dict['token_type_ids'])
        attention_mask.append(encoded_dict['attention_mask'])
        labels.append(encoded_dict["labels"])
        isnext.append(torch.tensor(item[2]))

    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    labels = torch.cat(labels, dim=0)

    input_ids = torch.LongTensor(input_ids)
    token_type_ids = torch.LongTensor(token_type_ids)
    attention_mask = torch.LongTensor(attention_mask)
    labels = torch.LongTensor(labels)
    isnext = torch.LongTensor(isnext)

    # print(input_ids.size(), token_type_ids.size(), attention_mask.size(), labels.size(),isnext.size())
    return input_ids, token_type_ids, attention_mask, labels, isnext

class triplet_MyDataset():
    def __init__(self, path):
        with open(path, 'rb') as out_file:
            p = pickle.load(out_file)
            h = pickle.load(out_file)
        self.train_data = []
        for i, j in zip(p, h):
            self.train_data.extend([i, j])

    def __getitem__(self, idx):
        return self.train_data[idx]

    def __len__(self):
        return len(self.train_data)


def triplet_make_data(batch_data):
    all_special_ids = [0, 1, 2]
    input_ids, token_type_ids, attention_mask, labels = [], [], [], []
    for item in batch_data:
        encoded_dict = tokenizer(item, return_tensors="pt", padding='max_length', max_length=512, truncation=True)
        encoded_dict["input_ids"], encoded_dict["labels"] = mask_token(encoded_dict["input_ids"])
        # special_tokens_mask = [1 if token in all_special_ids else 0 for token in list(encoded_dict["input_ids"][0])]
        # encoded_dict["input_ids"], encoded_dict["labels"] = torch_mask_tokens(encoded_dict["input_ids"], tokenizer,torch.tensor(special_tokens_mask), 0.15)
        input_ids.append(encoded_dict['input_ids'])
        token_type_ids.append(encoded_dict['token_type_ids'])
        attention_mask.append(encoded_dict['attention_mask'])
        labels.append(encoded_dict["labels"])

    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    labels = torch.cat(labels, dim=0)

    input_ids = torch.LongTensor(input_ids)
    token_type_ids = torch.LongTensor(token_type_ids)
    attention_mask = torch.LongTensor(attention_mask)
    labels = torch.LongTensor(labels)

    # print(input_ids.size(), token_type_ids.size(), attention_mask.size(), labels.size())
    return input_ids, token_type_ids, attention_mask, labels
