# coding: UTF-8
from cgi import print_environ
import torch
import pickle
import numpy as np
from tqdm import tqdm
from tensorboardX import SummaryWriter
import time
from random import *
import pickle, random, copy, os
from datetime import timedelta
from torch.utils.data import RandomSampler,DataLoader,SequentialSampler
import torch.nn as nn
from transformers import BertModel, BertTokenizer,BertForMaskedLM,BertConfig
from utils import MyDataset, make_data, make_pred_data
from optimization import BERTAdam
import datetime
import collections
from sklearn import metrics
random.seed(2021)

device=torch.device("cuda") # if torch.cuda.is_available() else "cpu")
os.environ["CUDA_VISIBLE_DEVICES"] = '1,2,3,4,5,6'  # 指定GPU
n_gpu = torch.cuda.device_count()
print('n_gpu:', n_gpu)



# def set_seed():
#     random.seed(1)
#     np.random.seed(1)
#     torch.manual_seed(1)  # 为CPU设置随机种子
#     torch.cuda.manual_seed_all(1)  # 为多GPU设置随机种子
#     torch.cuda.manual_seed(1)  # 单GPU设置
#     torch.backends.cudnn.deterministic = True  # 保证每次结果一样
#     torch.backends.cudnn.benchmark = False
#     torch.backends.cudnn.enabled = False  # 为卷积网络设置随机种子值


def predict(model,val_loader,result_file):
    model.eval()
    pred_all = np.array([], dtype=int)
    pred_all_score = np.array([], dtype=int)
    label_all = np.array([], dtype=int)
    label_all_score = np.array([], dtype=int)
    # file = open('pred_id.txt', 'a+', encoding='utf-8')
    with torch.no_grad():
        for i, batch in enumerate(val_loader):
            out = model.module(input_ids=batch[0].to(device),
                                token_type_ids=batch[1].to(device),
                                attention_mask=batch[2].to(device),
                                labels=batch[3].to(device),
                                return_dict=True)
            # print(batch[4])  # [{}, {13: 21323, 17: 21323}, {26: 21600, 29: 21600}, {}]
            logits = out.logits 
            for idx in range(len(batch[4])):
                if batch[4][idx] != {}:
                    for k,v in batch[4][idx].items():
                        # print(k,v)
                        # print(logits[idx][k].shape)  # torch.Size([21660])
                        p_score = logits[idx][k][v].detach().cpu().numpy()  # 具体当前位置上的，被预测为正确的药名id的概率是多少
                        pred = logits[idx][k].max(0)[1].detach().cpu().numpy()
                        pred_all = np.append(pred_all, pred)
                        pred_all_score = np.append(pred_all_score, p_score)
                        # print(p_score)
                        label_all = np.append(label_all, v)
                        label_all_score = np.append(label_all_score, np.array(1))
    print('pred_all_score:', pred_all_score)
    print('label_all_score:', label_all_score)
    print('pred_all:', len(pred_all), type(pred_all), pred_all, np.sum(pred_all == 1))
    # file.write(' '.join([str(w) for w in pred_all]))
    # file.write('\n')
    print("true_all:", len(label_all),len(set(label_all.tolist())), type(label_all), label_all)
    count = 0
    for i in range(0,len(label_all.tolist())-1, 2):
        if label_all.tolist()[i] in [pred_all.tolist()[i], pred_all.tolist()[i+1]]:
            count += 1
    print('only one acc:', count, count/ (len(label_all.tolist())/2) )
    c = 0
    for i,j in zip(label_all.tolist(), pred_all.tolist()):
        if i==j:
            c+=1
    acc = metrics.accuracy_score(label_all, pred_all)
    print('valid acc:', c, c/len(label_all.tolist()), acc)
    kappa = metrics.cohen_kappa_score(label_all,pred_all) #(label除非是你想计算其中的分类子集的kappa系数，否则不需要设置)
    print("kappa:", kappa)
    # file.close()
    return acc

    

def train():
    best_score = 0
    config = BertConfig.from_pretrained('chinese_L-12_H-768_A-12')
    print('config.vocab_size:', config.vocab_size)
    model_MLM = BertForMaskedLM.from_pretrained('chinese_L-12_H-768_A-12')
    model_MLM.resize_token_embeddings(config.vocab_size)
    model_MLM.to(device) 
    if n_gpu > 1:
        model_MLM = torch.nn.DataParallel(model_MLM)
    num_epochs = 2
    param_optimizer = list(model_MLM.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}]
    optimizer = BERTAdam(optimizer_grouped_parameters,
                        lr = 3e-5,
                        warmup = 0.01,
                        t_total=len(train_dataloader) * num_epochs)
    model_MLM.train()
    for i ,batch in enumerate(train_dataloader):
        batch=tuple(t.to(device) for t in batch)
        out=model_MLM(input_ids=batch[0],
                    token_type_ids=batch[1],
                    attention_mask=batch[2],
                    labels=batch[3],
                    return_dict=True)
        # print(out.logits,out.logits.shape)  # torch.Size([12, 512, 21128])
        loss=out.loss
        if n_gpu > 1:
            loss = loss.mean()
        if i % 100 == 0:
            writer.add_scalar('scalar/loss1', loss,i)
        print(loss,i)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if (i) % 100 == 0:  # 每隔10000 steps在val set 上进行预测查看结果
            print('start validating...')
            acc = predict(model_MLM, val_dataloader,'result/result_val_'+str(i)+'.txt')
            if best_score < acc:
                best_score = acc
                torch.save(model_MLM,'saved_dict_now/model.pkl') 

def remove_parameter():
    check_point = torch.load("chinese_L-12_H-768_A-12/pytorch_model.bin")
    dicts = collections.OrderedDict()
    embedding = nn.Embedding(533, 768)
    added_ = embedding(torch.LongTensor(list(range(0,533))))  # torch.Size([533, 768])
    for k, value in check_point.items():
        if k == "bert.embeddings.word_embeddings.weight":
            added = torch.rand(533,value.size(1))
            print('lili', value.size(),value.size(0),value.size(1))
            print(added_.shape)
            value = torch.cat([value, added_], dim = 0)
        if k == "cls.predictions.decoder.weight":
            added = torch.rand(533,value.size(1))
            print('lili', value.size(),value.size(0),value.size(1))
            value = torch.cat([value, added_], dim = 0)
        if k == "cls.predictions.bias":
            added = torch.rand(533)
            value = torch.cat([value, added], dim = 0)
        if k == "cls.predictions.decoder.bias":
            added = torch.rand(533)
            value = torch.cat([value, added], dim = 0)
        dicts[k] = value
    torch.save(dicts, "chinese_L-12_H-768_A-12/pytorch_model.bin")



if __name__ == '__main__':
    # set_seed()
    remove_parameter()
    best_score = 0
    start_time = datetime.datetime.now()
    writer = SummaryWriter(log_dir='scalar')
    tokenizer = BertTokenizer.from_pretrained('chinese_L-12_H-768_A-12')
    print('vocab_size:', len(tokenizer.vocab))
    train_path, val_path, test_path = 'data/train.txt', 'data/val.txt', 'data/test.txt'
    train_dataset, val_dataset, test_dataset = MyDataset(train_path), MyDataset(val_path), MyDataset(test_path)
    print(train_dataset.__getitem__(1))
    train_sampler, val_sampler, test_sampler = RandomSampler(train_dataset), SequentialSampler(val_dataset), SequentialSampler(test_dataset)
    train_dataloader = DataLoader(train_dataset,batch_size=36,sampler=train_sampler,collate_fn=make_data)
    val_dataloader = DataLoader(train_dataset,batch_size=4,sampler=val_sampler,collate_fn=make_pred_data)
    test_dataloader = DataLoader(train_dataset,batch_size=4,sampler=test_sampler,collate_fn=make_pred_data)

    # for i ,batch in enumerate(mlm_dataloader):
    #     print(i)
    #     break
    train()
    
    #  保存模型
    writer.close()
    end_time = datetime.datetime.now()
    during_time = end_time - start_time
    print("train done ! ! ! during_time:", during_time)
    
    print("loading model for predicting...")
    M_predict=torch.load('saved_dict_now/model.pkl')
    result_file='result_cls.txt'
    acc = predict(M_predict, test_dataloader,result_file)
    print("test acc:", acc)
    print('predicting ending ! !')