from sklearn.metrics import precision_score
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
import torch.optim as optim
from importlib import util
import copy, random,os
import numpy as np
import re
from tensorboardX import SummaryWriter
from transformers import WEIGHTS_NAME, CONFIG_NAME
import collections
import datetime
from transformers import (
    BertTokenizer,
    BertConfig,
    BertModel,
    AutoTokenizer,
    AutoModel
)
import argparse,pickle
from optimization import BERTAdam
from tensorboardX import SummaryWriter
from transformers import WEIGHTS_NAME, CONFIG_NAME
from utils import make_data,Mytest
from model import base_Model
import random
from sklearn.metrics import roc_auc_score
from sklearn import metrics
output_dir = 'saved_dict_now/'
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)

device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3,4,5,6,7"
n_gpu = torch.cuda.device_count()
best_score = 0

def set_seed():
    random.seed(1)
    np.random.seed(1)
    torch.manual_seed(1)  #为CPU设置随机种子
    torch.cuda.manual_seed_all(1)  # 为多GPU设置随机种子
    torch.cuda.manual_seed(1)  # 单GPU设置
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.enabled = False  # 为卷积网络设置随机种子值

def save_model(model,tokenizer):
    model_to_save = model.module if hasattr(model, 'module') else model
    torch.save(model_to_save.state_dict(), output_model_file)  # 保存模型参数
    model_to_save.config.to_json_file(output_config_file)
    tokenizer.save_vocabulary(output_dir)
'''
make_data
batch[3] :'抓住这一环，就为提高产品质量打下了基础。'
batch[4] :[[0, 1], [2], [3], [4], [5], [6], [7], [8, 9]] 
batch[5] :[(8, 'sbj', 6), (8, 'obj', 14)]
batch[6] :[('抓住', 'obj', '环'), ('打下', 'obj', '基础')]
batch[7] :['抓住/v', '这/r', '一/m', '环/n', '，/w', '就/d', '为/p', '提高/v', '产品/n', '质量/n', '打下/v', '了/y', '基础/n', '。/w']
'''
def calculate(pred, true):
    TP = np.sum(np.logical_and(np.equal(pred,1),np.equal(true,1)))
    pre = np.sum(np.equal(pred,1))
    tru = np.sum(np.equal(true,1))
    precision = TP / (pre + 1e-5)
    Recall = TP / (tru + 1e-5)
    F1 = 2 * precision * Recall / (precision + Recall + 1e-5)
    return precision, Recall, F1
def calcu_argument(cos_score,true_):
    pred_all, true_all = [], []
    for x in cos_score:
        pred, true = [], []
        for item in true_[x]:
            true.append(item.split('_')[0] + '_1')
        for item in cos_score[x]:
            id, p = item.split('_')[0], float(item.split('_')[1])
            if p >= 0.5:
                pred.append(id + '_1')
            else:
                pred.append(id + '_0')
        pp, tt = [], []
        for item in pred:
            id, num = item.split('_')
            pp.append(int(num))
            if id + '_' + '1' in true:
                tt.append(1)
            else:
                tt.append(0)
        pred_all.extend(pp)
        true_all.extend(tt)
    return pred_all, true_all
# 预测 & val set 计算 acc
def predict(model,test_loader,result_file):
    model.eval()
    file=open(result_file,'w',encoding='utf-8')
    pred_all, true_all = [], []
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            out = model.module.bert(input_ids=batch[0].to(device),
                                token_type_ids=batch[1].to(device),
                                attention_mask=batch[2].to(device),
                                return_dict=True)
            pooler_output = out.pooler_output #torch.Size([B, 768])
            for i in range(len(batch[3])):
                true_score = dict()
                cos_score=dict()
                true_ = dict()
                all_words, all_words_role = [], []  # 记录所有的分词索引
                for h in batch[4][i]:  # 记录每个分词的在句中的索引
                    zu = [out.last_hidden_state[i][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                    word = torch.cat(zu,dim=0).mean(0) #torch.Size([768])
                    all_words.append(word)  # 获得每个分词对应的embedding
                dic = {}  #  {4: ['sbj_HX/Exp/2','sbj_HX/Cla/11'], 8: []}
                for item in batch[5][i]:
                    dic.setdefault(item[0],[]).append(item[1] + '/' + item[2] + '/' + str(item[3]))
                for it in dic:  # {4: ['sbj_HX/Exp/2','sbj_HX/Cla/11']}
                    center = all_words[it].unsqueeze(0)  # 核心谓词向量 [1*768]
                    center_word = batch[7][i][it]  # batch[6][i][s][0]
                    cos = dict()
                    for m in range(len(all_words)):
                        if m != it:  # 除了核心谓词以外的词
                            other_w = batch[7][i][m]
                            if other_w.split('/')[1] in ['w','u','c','a','f','m','d','y','p','q']: # 标点符号,连词等不参与候选词运算
                                continue
                            # 论元
                            zu = [center, all_words[m].unsqueeze(0)] #[(1*768),(1*768)...]
                            M_vec = torch.cat(zu,dim=1) # [1*768*2]
                            M_vec = model.module.dense(M_vec)
                            M_vec = model.module.activation(M_vec)
                            pred = model.module.cls(M_vec)  # [1*768]*[768*2]——>[1*2]
                            pred = torch.softmax(pred,dim=1)
                            cos_score.setdefault(it,[]).append(str(m) + '_' + str(float(pred[0][1].cpu().numpy())))
                            cos[float(pred[0][1].cpu().numpy())] = center_word + '_' + other_w
                    result = sorted(cos.items(), key=lambda t: t[0], reverse=True)  # list
                    for ar in dic[it]:
                        la, role, idx = ar.split('/')[0], ar.split('/')[1], int(ar.split('/')[2])
                        true_.setdefault(it,[]).append(str(idx) + '_' + role)
                        file.write(batch[7][i][it]+'_' + la + '_' + role+'_' + batch[7][i][idx] +'\n')
                        file.write('#'+''.join(x for x in batch[7][i])+'\n')  # batch[5]是原本的词token
                        for item in result:
                            file.write(item[1] + '\t' + str(item[0]) + '\n')
                        file.write('\n')
                # print("cos_score:", cos_score)
                # print("true_:", true_)
                pp, tt = calcu_argument(cos_score, true_)
                pred_all.extend(pp)
                true_all.extend(tt)
    # calculate argument:
    acc = metrics.accuracy_score(true_all, pred_all)
    print('auc: ', roc_auc_score(true_all, pred_all))
    print('acc:', acc)
    print(metrics.classification_report(true_all, pred_all,target_names = ['0','1'], digits=4)) 
    file.close()
    return acc

'''
true_score: {1: ['0_0.43950164318084717', '13_0.5050801038742065']}
cos_score: {1: ['0_0.43950164318084717', '2_0.503955066204071', '3_0.47540757060050964', '50.47132056951522827', '70.46329084038734436', '130.5050801038742065', '00.43950164318084717', '20.503955066204071', '30.47540757060050964', '50.47132056951522827', '70.46329084038734436', '130.5050801038742065']}
'''
def train(config):
    best_score = 0
    # model=base_Model('bert-base-chinese',config=config)
    model = base_Model('chinese_wwm_pytorch',config=config)
    model.config = config
    model.to(device)
    print('n_gpu:',n_gpu)
    if n_gpu > 1:
        model = torch.nn.DataParallel(model)
    criterion = nn.CrossEntropyLoss()
    # loss2 = CELoss(label_smooth=0.05, class_num=2)
    num_train_epochs = 60  # 5
    total_steps = int(len(train_loader) * num_train_epochs)
    optimizer = BERTAdam(params=model.parameters(),
                         lr = 5e-5,
                         warmup = 0.1,
                         max_grad_norm = 1.0,
                         t_total = total_steps,
                         schedule = 'warmup_linear',
                         weight_decay_rate = 0.01)
    writer = SummaryWriter(log_dir='scalar')
    model.train()
    for ep in range(num_train_epochs):
        for i, batch in enumerate(train_loader):
            pred,label = model(input_ids = batch[0].to(device),
                            token_type_ids = batch[1].to(device),
                            attention_mask = batch[2].to(device),
                            orig = batch[3],
                            fenci = batch[4],
                            trip = batch[5],
                            appen = batch[6],
                            return_dict = True )
            if len(label) == 0:
                pass
            else:
                loss = criterion(pred,label)
                # loss=loss2(pred, label)
                if n_gpu > 1:
                    loss = loss.mean()
                if i % 1000 == 0:
                    writer.add_scalar('scalar/loss1', loss,i)
                print(loss,i)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                if (i) % 100 == 0:  # 每隔10000 steps在val set 上进行预测查看结果
                    print('start validating...')
                    # torch.save(model,'saved_dict_now/model.pkl')  # 保存模型
                    acc = predict(model, val_loader,'result/result_val_'+str(i)+'.txt')
                    if best_score < acc:
                        best_score = acc
                        torch.save(model,'saved_dict_now/model.pkl')  # 保存模型
    end_time = datetime.datetime.now()
    during_time = end_time - start_time
    print("train done ! ! ! during_time:", during_time)

if __name__ == '__main__':
    set_seed()  # 设置随机种子
    best_score = 0
    print('n_gpu:',n_gpu)
    start_time = datetime.datetime.now()
    tokenizer = BertTokenizer.from_pretrained('chinese_wwm_pytorch')
    input=tokenizer('这是一条测试句', return_tensors="pt", padding='max_length', max_length=10, truncation=True)
    print("tokenizer:", tokenizer)
    print("vocab_size:", len(tokenizer.vocab))
    print(tokenizer.convert_tokens_to_ids(tokenizer.mask_token),tokenizer.mask_token)

    train_path='./data/shuffle_manual_role/train_data.pkl'
    val_path='./data/shuffle_manual_role/val_data.pkl'
    test_path='./data/shuffle_manual_role/test_data.pkl'
    # train_path='./data/manual_argument/train_data.pkl'
    # val_path='./data/manual_argument/val_data.pkl'
    # test_path='./data/manual_argument/test_data.pkl'
    train_data=Mytest(train_path)
    val_data=Mytest(val_path)
    test_data=Mytest(test_path)
    print(train_data.__getitem__(1))
    print(val_data.__getitem__(1))
    print(test_data.__getitem__(1))
    
    train_sampler = Data.RandomSampler(train_data)
    val_sampler = Data.SequentialSampler(val_data)
    test_sampler = Data.SequentialSampler(test_data)
    
    train_loader = Data.DataLoader(train_data, batch_size=72,sampler=train_sampler,collate_fn=make_data)
    val_loader = Data.DataLoader(val_data, batch_size=72,sampler=val_sampler,collate_fn=make_data)
    test_loader = Data.DataLoader(test_data,batch_size=72, sampler=test_sampler,collate_fn=make_data)
    
    config = BertConfig.from_pretrained('chinese_wwm_pytorch')
    train(config)
    print("loading model for predicting...")
    M_predict=torch.load('saved_dict_now/model.pkl')
    result_file='result_cls.txt'
    acc = predict(M_predict, test_loader,result_file)
    print('predicting ending ! !')


    # test_path='./data/final/multi.pkl'
    # test_data=Mytest(test_path)
    # test_sampler = Data.SequentialSampler(test_data)
    # test_loader = Data.DataLoader(test_data,batch_size=2, sampler=test_sampler,collate_fn=make_data)
    # config = BertConfig.from_pretrained('chinese_wwm_pytorch')
    
    # # train(config)
    # print("loading model for predicting...")
    # M_predict=torch.load('models/mode_40.pkl')
    # result_file='result_mu.txt'
    # acc = predict(M_predict, test_loader,result_file)
    # print('predicting ending ! !')
    
    
    
    
    
    
    
    