# coding: UTF-8
import torch
import copy
import os
from tqdm import tqdm
from tensorboardX import SummaryWriter
from transformers import WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.optimization import BertAdam
import time
import random
from random import *
import pickle, random, copy
from datetime import timedelta
from torch.utils.data import RandomSampler,DataLoader
import torch.nn as nn
from transformers import BertModel, BertTokenizer,BertForPreTraining,BertForMaskedLM
random.seed(2021)

class MyDataset():
    def __init__(self,path):
        self.lines=[]
        ff = open(path,'r')
        for line in ff.readlines():
            self.lines.append(line.strip())
    def __getitem__(self,idx):
        return self.lines[idx]
    def __len__(self):
        return len(self.lines)

def torch_mask_tokens(inputs,special_tokens_mask= None,mlm_probability=0.15):
    """
    Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
    """
    labels = inputs.clone()
    # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
    probability_matrix = torch.full(labels.shape, mlm_probability)
    if special_tokens_mask is None:
        special_tokens_mask = [
            tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
        ]
        special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
    else:
        special_tokens_mask = special_tokens_mask.bool()

    probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
    masked_indices = torch.bernoulli(probability_matrix).bool()
    labels[~masked_indices] = -100  # We only compute loss on masked tokens

    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
    indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)

    # 10% of the time, we replace masked input tokens with random word
    indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
    random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
    inputs[indices_random] = random_words[indices_random]

    # The rest of the time (10% of the time) we keep the masked input tokens unchanged
    return inputs, labels


def mask_token(input_ids):
    max_pred = 5  # max tokens of prediction
    labels = []
    input_ids = list(input_ids.squeeze(0).numpy())
    input = copy.deepcopy(input_ids)
    cand_maked_pos = [i for i in range(len(input_ids)) if input_ids[i] not in [100,101,102,0]]
    random.shuffle(cand_maked_pos)
    n_pred = min(max_pred, max(1, int(len(input_ids) * 0.15)))  # 15 % of tokens in one sentence
    masked_pos = []
    masked_token = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
    for pos in cand_maked_pos:
        if len(masked_pos) > n_pred:
            break
        if random.random() < 0.8:  # 80%替换成[MASK]
            masked_pos.append(pos)
            input_ids[pos] = masked_token  # make mask
        elif random.random() > 0.9:  # 10% 随机替换成词表中其他的词
            index = random.randint(0, vocab_size - 1)  # random index in vocabulary
            while index in [101, 102, 100, 0]:  # can't involve 'CLS', 'SEP', 'PAD'
                index = random.randint(0, vocab_size - 1)
            masked_pos.append(pos)
            input_ids[pos] = index  # replace
    labels=[input[i] if i in masked_pos else -100 for i in range(len(input))]
    return torch.LongTensor(input_ids).unsqueeze(0), torch.LongTensor(labels).unsqueeze(0)

def make_data(batch_data):
    input_ids, token_type_ids, attention_mask, labels = [], [], [], []
    all_special_ids=[101,102,0]
    for item in batch_data:
        encoded_dict = tokenizer(item, return_tensors="pt",padding='max_length', max_length=40, truncation=True)
        # encoded_dict["input_ids"], encoded_dict["labels"] = mask_token(encoded_dict["input_ids"])
        special_tokens_mask = [1 if token in all_special_ids else 0 for token in list(encoded_dict["input_ids"][0])]
        encoded_dict["input_ids"], encoded_dict["labels"] = torch_mask_tokens(encoded_dict["input_ids"],torch.tensor(special_tokens_mask), 0.15)
        input_ids.append(encoded_dict['input_ids'])
        token_type_ids.append(encoded_dict['token_type_ids'])
        attention_mask.append(encoded_dict['attention_mask'])
        labels.append(encoded_dict["labels"])
    input_ids = torch.cat(input_ids, dim=0)
    token_type_ids = torch.cat(token_type_ids, dim=0)
    attention_mask = torch.cat(attention_mask, dim=0)
    labels = torch.cat(labels, dim=0)

    input_ids = torch.LongTensor(input_ids)
    token_type_ids = torch.LongTensor(token_type_ids)
    attention_mask = torch.LongTensor(attention_mask)
    labels = torch.LongTensor(labels)
    # print(input_ids, token_type_ids, attention_mask, labels)
    return input_ids, token_type_ids, attention_mask, labels

if __name__ == '__main__':
    random.seed(2021)
    writer = SummaryWriter(log_dir='scalar')
    device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
    device = torch.device("cuda")
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 指定GPU
    n_gpu = torch.cuda.device_count()
    data_path='./single_data/MLM.txt'
    tokenizer = BertTokenizer.from_pretrained('/nfs/volume-881-1/zy/bert_pretrain')
    vocab_size=len(tokenizer.vocab)
    print('vocab_size:',vocab_size)
    mlm_dataset = MyDataset(data_path)
    mlm_sampler = RandomSampler(mlm_dataset)
    mlm_dataloader = DataLoader(mlm_dataset,batch_size=64,sampler=mlm_sampler,collate_fn=make_data)
    model_MLM=BertForMaskedLM.from_pretrained('/nfs/volume-881-1/zy/bert_pretrain')
    model_MLM.to(device) 
    num_epochs=1
    param_optimizer = list(model_MLM.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
    optimizer = BertAdam(optimizer_grouped_parameters,
                        lr=5e-5 ,
                        warmup=0.05,
                        t_total=len(mlm_dataloader) * num_epochs)

    for i ,batch in enumerate(mlm_dataloader):
        batch=tuple(t.to(device) for t in batch)
        out=model_MLM(input_ids=batch[0],
                    token_type_ids=batch[1],
                    attention_mask=batch[2],
                    labels=batch[3],
                    return_dict=True)
        print(out.loss,i)
        loss=out.loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        writer.add_scalar('scalar/test', out.loss, i)
    
    #  保存模型
    output_dir = 'saved_models/'
    output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
    output_config_file = os.path.join(output_dir, CONFIG_NAME)
    model_to_save = model_MLM.module if hasattr(model_MLM, 'module') else model_MLM

    torch.save(model_to_save.state_dict(), output_model_file)
    model_to_save.config.to_json_file(output_config_file)
    tokenizer.save_vocabulary(output_dir)
    writer.close()