import torch
import torch.optim as optim
import torch.nn as nn
import os
import copy, random
import torch.utils.data as Data
import numpy as np
from tensorboardX import SummaryWriter
from ModelForMLM import ModelForMLM
import collections
import datetime
import numpy as np
from utils import make_AB_data, triplet_make_data, MyDataset, triplet_MyDataset, make_A_data
from tokenizers.processors import BertProcessing
from transformers import (
    RobertaConfig,
    RobertaForMaskedLM,
    BertTokenizer,
    BertConfig,
    PreTrainedTokenizerFast,
    AutoTokenizer,
    AutoModel,
    BertForPreTraining
)
from tokenizers import Tokenizer
import argparse
from optimization import BERTAdam

valid_herbs = []
with open("herb_vocab_6888.txt", 'r', encoding='utf-8') as f:
    content = f.readlines()
valid_herbs = [item.strip().split()[0] for item in content]

device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
n_gpu = torch.cuda.device_count()
best_score = 0

def flat_accuracy(preds, labels):
    preds = preds.detach().cpu().numpy()
    pred_flat = np.argmax(preds, axis=1).flatten()
    labels_flat = labels.cpu().numpy().flatten()
    tmp_eval_accuracy = np.sum(pred_flat == labels_flat) / len(labels_flat)
    return tmp_eval_accuracy


def eval(model,eval_dataloader):
    model.eval()
    eval_accuracy, nb_eval_steps = 0, 0
    for i, batch1 in enumerate(eval_dataloader):
        batch1 = tuple(t.to(device) for t in batch1)
        with torch.no_grad():
            out, last = model(input_ids=batch1[0],
                              token_type_ids=batch1[1],
                              attention_mask=batch1[2],
                              labels=batch1[3],
                              next_sentence_label=batch1[4])
            tmp_eval_accuracy = flat_accuracy(out.seq_relationship_logits, batch1[4])
            eval_accuracy += tmp_eval_accuracy
            nb_eval_steps += 1

    print("Validation Accuracy: {}".format(eval_accuracy / nb_eval_steps))
    global best_score
    if best_score < eval_accuracy / nb_eval_steps:
        best_score = eval_accuracy / nb_eval_steps
        torch.save(model, 'model.pkl')

class pre_cls_model(nn.Module):
    def __init__(self, model_path, config):
        super().__init__()
        self.pretrain_bert = BertForPreTraining.from_pretrained(model_path)
        self.predict_cls = nn.Linear(config.hidden_size, config.herb_size)

    def resize_token_embeddings(self, len):
        self.pretrain_bert.resize_token_embeddings(len)

    def forward(self, input_ids, token_type_ids, attention_mask, labels=None, next_sentence_label=None, need_cls=False):
        pre_out, pooler_output = self.pretrain_bert(input_ids=input_ids,
                                           token_type_ids=token_type_ids,
                                           attention_mask=attention_mask,
                                           labels=labels,
                                           next_sentence_label=next_sentence_label,
                                           return_dict=True)
        #  last.last_hidden_state:[batch_size, length, hidden_dim]
        #  pooler_output :[batch_size, hidden_dim] torch.Size([8, 768])
        cls_out = None
        if need_cls:
            cls_out = self.predict_cls(pooler_output)  # [batch_size, hidden_dim]——>[batch_size, herb_size]
        return pre_out, cls_out


def mask_token(orginput_ids=None, pred=None):
    # print(orginput_ids.shape, pred)
    max_pred = 5
    orginput_ids = orginput_ids.cpu().numpy().tolist()
    newinout_ids, labels = [], []
    for input_ids, p in zip(orginput_ids, pred):
        input = copy.deepcopy(input_ids)
        exclude = [101, 102, 100, 0] + p
        cand_maked_pos = [i for i in range(len(input_ids)) if input_ids[i] not in exclude]
        random.shuffle(cand_maked_pos)
        # print("cand_maked_pos:",cand_maked_pos,len(input_ids))
        n_pred = min(max_pred, max(1, int(len(input_ids) * 0.15)))  # 15 % of tokens in one sentence
        # print("n_pred:",n_pred)
        masked_pos = []
        masked_token = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
        for pos in cand_maked_pos:
            if len(masked_pos) > n_pred:
                break
            if random.random() < 0.8:  # 80%
                masked_pos.append(pos)
                input_ids[pos] = masked_token  # make mask
            elif random.random() > 0.9:  # 10% 随机替换成词表中其他的词
                index = random.randint(0, vocab_size - 1)  # random index in vocabulary
                while index in exclude:  # can't involve 'CLS', 'SEP', 'PAD'
                    index = random.randint(0, vocab_size - 1)
                masked_pos.append(pos)
                input_ids[pos] = index  # replace
        # print("masked_pos:",masked_pos)
        label = [input[i] if i in masked_pos else -100 for i in range(len(input))]
        newinout_ids.append(input_ids)
        labels.append(label)
    newinout_ids = torch.from_numpy(np.array(newinout_ids))
    labels = torch.from_numpy(np.array(labels))
    print(newinout_ids.shape, labels.shape)
    return newinout_ids, labels


def train_eval():
    model.train()
    # pos_weight = torch.ones([config.herb_size]).to(device)  # 定义每个类的权重值,label smooth
    # print(pos_weight.shape, pos_weight)
    # loss_fct = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
    loss_fct = nn.BCEWithLogitsLoss()
    print("123:", len(AB_dataloader))
    train_accuracy, total_steps = 0, 0
    pred = []
    for ep in range(args.num_train_epochs):
        for i, (batch1, batch2) in enumerate(zip(AB_dataloader, A_dataloader)):
            total_steps += 1
            if pred != []:
                batch1 = list(batch1)
                batch1[0], batch1[3] = mask_token(batch1[5], pred)
                batch1 = tuple(batch1)
            batch1 = tuple(t.to(device) for t in batch1)
            pre_out, cls = model(input_ids=batch1[0],
                                token_type_ids=batch1[1],
                                attention_mask=batch1[2],
                                labels=batch1[3],
                                next_sentence_label=batch1[4])
            loss = pre_out.loss
            if n_gpu > 1:
                loss = loss.mean()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if total_steps % 100 == 0:
                print("pre_out loss:", loss, total_steps)
                writer.add_scalar('show/pre_out loss', loss, total_steps)

            pre_out, cls = model(input_ids=batch2[0].to(device),
                                 token_type_ids=batch2[1].to(device),
                                 attention_mask=batch2[2].to(device),
                                 labels=None,
                                 next_sentence_label=None,
                                 need_cls=True)

            pred = []
            for idx in range(len(batch2[4])):
                pred.append(torch.topk(cls[idx], len(batch2[4][idx])).indices.detach().cpu().numpy().tolist())
            print("pred:", pred)
            print("target:", batch2[4])
            loss = loss_fct(cls, batch2[3].float().to(device))
            if n_gpu > 1:
                loss = loss.mean()
            if total_steps % 100 == 0:
                print("cls loss:", loss, total_steps)
                writer.add_scalar('show/cls loss', loss, total_steps)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    writer.close()


def train_eval_AB():
    model.train()
    print("123:", len(AB_dataloader))
    train_accuracy, total_steps = 0, 0
    for ep in range(args.num_train_epochs):
        for i, batch1 in enumerate(AB_dataloader):
            total_steps += 1
            batch1 = tuple(t.to(device) for t in batch1)
            pre_out, cls = model(input_ids=batch1[0],
                                token_type_ids=batch1[1],
                                attention_mask=batch1[2],
                                labels=batch1[3],
                                next_sentence_label=batch1[4])
            loss = pre_out.loss
            print("pre_out loss:", loss, total_steps)
            if n_gpu > 1:
                loss = loss.mean()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            acc = flat_accuracy(pre_out.seq_relationship_logits, batch1[4])
            train_accuracy += acc
            if total_steps % 100 == 0:
                print("pre_out loss:", loss, total_steps)
                print("training Accuracy: {}".format(train_accuracy / total_steps))
                writer.add_scalar('show/training Accuracy', train_accuracy / total_steps, total_steps)
                writer.add_scalar('show/pre_out loss', loss, total_steps)

    eval(model, eval_dataloader)
    writer.close()

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name_or_path", default=None, type=str)
    parser.add_argument("--train_file", default=None, type=str)
    parser.add_argument("--eval_file", default=None, type=str)
    parser.add_argument("--num_train_epochs", default=1, type=int)
    parser.add_argument("--train_batch_size", default=8, type=int)
    parser.add_argument("--eval_batch_size", default=8, type=int)
    args = parser.parse_args()

    start_time = datetime.datetime.now()
    tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
    print("tokenizer:", tokenizer)
    vocab_size = len(tokenizer.vocab)
    print("vocab_size:", vocab_size)

    AB_dataset = MyDataset(args.train_file, 0.6)
    AB_dataloader = Data.DataLoader(AB_dataset,
                                    batch_size=args.train_batch_size,
                                    collate_fn=make_AB_data)
    # eval_dataset = MyDataset(args.eval_file, 0.6)
    # eval_dataloader = Data.DataLoader(eval_dataset,
    #                                   batch_size=args.train_batch_size,
    #                                   collate_fn=make_AB_data)

    A_dataset = MyDataset(args.train_file, 0.6, True)
    A_dataloader = Data.DataLoader(A_dataset,
                                   batch_size=args.train_batch_size,
                                   collate_fn=make_A_data)

    config = BertConfig.from_pretrained(args.model_name_or_path)
    config.herb_size = len(valid_herbs)
    print('123:', config.herb_size)
    model = BertForPreTraining.from_pretrained(args.model_name_or_path)
    # model = pre_cls_model(args.model_name_or_path, config)
    model.resize_token_embeddings(vocab_size)
    model.to(device)
    if n_gpu > 1:
        model = torch.nn.DataParallel(model)
    criterion = nn.CrossEntropyLoss()
    total_steps = int(len(AB_dataloader) * args.num_train_epochs)
    # total_steps = 1000
    optimizer = BERTAdam(params=model.parameters(),
                         lr=1e-5,
                         warmup=0.1,
                         max_grad_norm=1.0,
                         t_total=total_steps,
                         schedule='warmup_linear',
                         weight_decay_rate=0.01)

    writer = SummaryWriter(log_dir='scalar_test')
    # train_eval_AB()
    train_eval()
    print("train done!")
    end_time = datetime.datetime.now()
    during_time = end_time - start_time
    print("during_time:", during_time)









    # fill-mask 个例试验
    # model = torch.load("model.pkl")
    # model.config = BertConfig.from_pretrained("bert-base-chinese")
    # from transformers import pipeline
    # fill_mask = pipeline(
    #     "fill-mask",
    #     model=model,
    #     tokenizer=tokenizer
    # )
    # MASK_TOKEN = tokenizer.mask_token
    # print(MASK_TOKEN, tokenizer)
    # result = fill_mask("大黄芒消厚朴枳实 {}.".format(MASK_TOKEN))
    # print(result)

    # eval NSP 测试正确率
    # model = torch.load("model.pkl")
    # model.to(device)
    # eval(model, eval_dataloader)









































    # for ep in range(10):
    #     for i, (batch1,batch2) in enumerate(zip(train_dataloader,triplet_dataloader)):
    #         batch1 = tuple(t.to(device) for t in batch1)
    #         # print("batch1:",batch1)
    #         loss1=model(input_ids=batch1[0],token_type_ids=batch1[1],attention_mask=batch1[2],labels=batch1[3],isnext=batch1[4],return_dict=True)
    #         if n_gpu > 1:
    #             loss = loss1.mean()
    #         if i %30==0:
    #             writer.add_scalar('scalar/loss1', loss.item(), i)
    #         print(" loss1:", loss, i)
    #         optimizer.zero_grad()
    #         loss.backward()
    #         optimizer.step()
    #         batch2 = tuple(t.to(device) for t in batch2)
    #         # print("batch2:", batch2)
    #         loss2 = model(input_ids=batch2[0], token_type_ids=batch2[1], attention_mask=batch2[2], labels=batch2[3],return_dict=True)
    #         if n_gpu > 1:
    #             loss = loss2.mean()
    #         if i %30==0:
    #             writer.add_scalar('scalar/loss2', loss.item(), i)
    #         print(" loss2:", loss, i)
    #         optimizer.zero_grad()
    #         loss.backward()
    #         optimizer.step()
    #
    # writer.close()
    # print("train done!")
    # endtime = datetime.datetime.now()
    # duringtime = endtime - starttime
    # print("time:",duringtime.seconds)


 #修改token_type_embeddings
    # check_point = torch.load("./guwenbert-base/pytorch_model.bin")
    # dicts = collections.OrderedDict()
    # for k, value in check_point.items():
    #     if k == "roberta.embeddings.token_type_embeddings.weight":
    #         print(value.size())
    #         value = value.repeat(2, 1)
    #         print(value.size())
    #     dicts[k] = value
    # torch.save(dicts, "pytorch_model.bin")
    # print("done")

