import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
from importlib import util
import os
import numpy as np
import re
import asyncio
import argparse
from utils import make_data ,Mytest
from model import base_Model
import random,json
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
from sklearn import metrics

device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3,4,5,6,7"
n_gpu = torch.cuda.device_count()

class_num = ['Reas' , 'Cons' , 'CaiLiao' , 'Stat' , 'Sco' , 'GongJu' , 'Dir' , 'YuanDian' , 'Time' , 'YiJu' , 'Quan' , 'FangShi' , 'ZhongDian' , 'LinTi' , 'Aft' , 'dsbj' , 'Prod' , 'Poss' , 'Loc' , 'Belg' , 'Datv' , 'Pat' , 'Clas' , 'Cont' , 'dobj' , 'Agt' , 'Exp' ]
class_id = {class_num[i] : i for i in range(len(class_num))}
id_class = {i:w for w,i in class_id.items()}

def calcu_argument(cos_score,true_):
    pred_all, true_all = [], []
    for x in cos_score:
        pred, true = [], []
        for item in true_[x]:
            true.append(item.split('_')[0] + '_1')
        for item in cos_score[x]:
            id, p = item.split('_')[0], float(item.split('_')[1])
            if p >= 0.5:
                pred.append(id + '_1')
            else:
                pred.append(id + '_0')
        pp, tt = [], []
        for item in pred:
            id, num = item.split('_')
            pp.append(int(num))
            if id + '_' + '1' in true:
                tt.append(1)
            else:
                tt.append(0)
        pred_all.extend(pp)
        true_all.extend(tt)
    return pred_all, true_all
        
def calcu_role(cos_score, true_):
    for x in cos_score:
        pp, true, tt = [], {}, []
        for item in true_[x]:
            true[item.split('_')[0]] = item   # {'0':'0_Exp', '7':'7_dobj'}
        for item in cos_score[x]:
            id, p, role = item.split('_')[0], float(item.split('_')[1]), item.split('_')[2]
            if p >= 0.5:
                pp.append(id + '_' + role)  # 0_Exp
            else:
                pp.append(0)
            if id in true:
                tt.append(true[id])
            else:
                tt.append(0)
        pp_role.extend(pp)
        tt_role.extend(tt)
    return
def calculate():
    print(len(pp_role))
    assert len(pp_role) == len(tt_role)
    TP, pre, tru = 0, 0, 0
    for i, j in zip(pp_role, tt_role):
        if i == j and i !=0:
            TP += 1
        if i != 0:
            pre += 1
        if j != 0:
            tru += 1
    print('TP, pre, tru', TP, pre, tru)
    precision = TP / (pre + 1e-5)
    Recall = TP / (tru + 1e-5)
    F1 = 2 * precision * Recall / (precision + Recall + 1e-5)
    return precision, Recall, F1

def set_seed():
    random.seed(1)
    np.random.seed(1)
    torch.manual_seed(1)  # 为CPU设置随机种子
    torch.cuda.manual_seed_all(1)  # 为多GPU设置随机种子
    torch.cuda.manual_seed(1)  # 单GPU设置
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.enabled = False  # 为卷积网络设置随机种子值

'''
make_data
batch[3] :'抓住这一环，就为提高产品质量打下了基础。'
batch[4] :[[0, 1], [2], [3], [4], [5], [6], [7], [8, 9]] 
batch[5] :[(8, 'sbj', 6), (8, 'obj', 14)]
batch[5] :[(18, 'sbj_HX', {'Agt'}, 17), (18, 'obj_HX', {'Datv'}, 19)]
batch[6] :[('抓住', 'obj', '环'), ('打下', 'obj', '基础')]
batch[7] :['抓住/v', '这/r', '一/m', '环/n', '，/w', '就/d', '为/p', '提高/v', '产品/n', '质量/n', '打下/v', '了/y', '基础/n', '。/w']
'''

def predict(model, role_model, test_loader, result_file):
    model.eval()
    file=open(result_file,'w',encoding='utf-8')
    pred_all, true_all = [], []
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            out = model.module.bert(input_ids=batch[0].to(device),
                                token_type_ids=batch[1].to(device),
                                attention_mask=batch[2].to(device),
                                return_dict=True)
            out_role = role_model.bert(input_ids=batch[0].to(device),
                                token_type_ids=batch[1].to(device),
                                attention_mask=batch[2].to(device),
                                return_dict=True)
            pooler_output = out.pooler_output  # torch.Size([B, 768])
            for i in range(len(batch[3])):
                true_score = dict()
                cos_score=dict()
                true_ = dict()
                all_words, all_words_role = [], []  # 记录所有的分词索引
                for h in batch[4][i]:  # 记录每个分词的在句中的索引
                    zu = [out.last_hidden_state[i][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                    word = torch.cat(zu,dim=0).mean(0) #torch.Size([768])
                    all_words.append(word)  # 获得每个分词对应的embedding
                for h in batch[4][i]:  # 记录每个分词的在句中的索引
                    zu_role = [out_role.last_hidden_state[i][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                    word = torch.cat(zu_role,dim=0).mean(0) #torch.Size([768])
                    all_words_role.append(word)  # 获得每个分词对应的embedding
                dic = {}  #  {4: ['sbj_HX/Exp/2','sbj_HX/Cla/11'], 8: []}
                for item in batch[5][i]:
                    dic.setdefault(item[0],[]).append(item[1] + '/' + item[2] + '/' + str(item[3]))
                for it in dic:  # {4: ['sbj_HX/Exp/2','sbj_HX/Cla/11']}
                    center = all_words[it].unsqueeze(0)  # 核心谓词向量 [1*768]
                    center_word = batch[7][i][it]  # batch[6][i][s][0]
                    cos = dict()
                    for m in range(len(all_words)):
                        if m != it:  # 除了核心谓词以外的词
                            other_w = batch[7][i][m]
                            # print(batch[7][i])
                            # print(other_w)
                            if other_w.split('/')[1] in ['w','u','c','a','f','m','d','y','p','q']: # 标点符号,连词等不参与候选词运算
                                continue
                            center_role = all_words_role[it].unsqueeze(0)
                            zu_=[center_role,all_words_role[m].unsqueeze(0)] #[(1*768),(1*768)...]
                            M_vec=torch.cat(zu_,dim=1) # [1*768*2]
                            # 角色
                            R_vec = role_model.dense(M_vec)
                            R_vec = role_model.activation(R_vec)
                            pred = role_model.cls(R_vec)  # [1*768]*[768*2]——>[1*4]
                            pred = torch.softmax(pred,dim=1)  # [1*27]
                            p = torch.max(pred, 1)[1].cpu().numpy()[0]
                            p_role = id_class[p]
                            # 论元
                            zu = [center, all_words[m].unsqueeze(0)] #[(1*768),(1*768)...]
                            M_vec = torch.cat(zu,dim=1) # [1*768*2]
                            M_vec = model.module.dense(M_vec)
                            M_vec = model.module.activation(M_vec)
                            pred = model.module.cls(M_vec)  # [1*768]*[768*2]——>[1*2]
                            pred = torch.softmax(pred,dim=1)
                            cos_score.setdefault(it,[]).append(str(m) + '_' + str(float(pred[0][1].cpu().numpy())) + '_' + p_role)
                            cos[float(pred[0][1].cpu().numpy())] = center_word + '_' + other_w + ':     ' + p_role
                    result = sorted(cos.items(), key=lambda t: t[0], reverse=True)  # list
                    for ar in dic[it]:
                        la, role, idx = ar.split('/')[0], ar.split('/')[1], int(ar.split('/')[2])
                        true_.setdefault(it,[]).append(str(idx) + '_' + role)
                        # print(batch[7][i])
                        # print('ar:', ar)
                        # print(it,la, role, idx)
                        file.write(batch[7][i][it]+'_' + la + '_' + role+'_' + batch[7][i][idx] +'\n')
                        file.write('#'+''.join(x for x in batch[7][i])+'\n')  # batch[5]是原本的词token
                        for item in result:
                            file.write(item[1] + '\t' + str(item[0]) + '\n')
                        file.write('\n')
                # print("cos_score:", cos_score)
                # print("true_:", true_)
                pp, tt = calcu_argument(cos_score, true_)
                pred_all.extend(pp)
                true_all.extend(tt)

                calcu_role(cos_score, true_)
    precision, Recall, F1 = calculate()
    print("role:", precision, Recall, F1)
    # calculate argument:
    print('auc: ', roc_auc_score(true_all, pred_all))
    print('acc:', metrics.accuracy_score(true_all, pred_all))
    print(metrics.classification_report(true_all, pred_all,target_names = ['0','1'], digits=4)) 
    file.close()
    return 0


if __name__ == '__main__':
    pp_role, tt_role = [], []
    print('n_gpu:', n_gpu)
    set_seed() 
    best_score = 0
    pred_path='./data/manual_role/test_data.pkl'
    pred_path='./data/label_data/gold_data_process/gold_manual_data1000.pkl' 
    pred_data=Mytest(pred_path)
    pred_sampler = Data.SequentialSampler(pred_data)
    pred_loader = Data.DataLoader(pred_data, batch_size=2, sampler=pred_sampler,collate_fn=make_data)
    print("loading model for edicting...")
    M_predict=torch.load('saved_dict_now/model_ar_4.pkl')
    role_model=torch.load('saved_dict_now/model_role27_2.pkl')
    # M_predict=torch.load('saved_dict_now/model_200.pkl',map_location='cpu')
    # M_predict=torch.load('saved_dict_now/model_200.pkl',map_location=lambda storage, loc: storage)
    result_file='predict_result/result_sum.txt'
    predict(M_predict, role_model, pred_loader,result_file)
    print('predicting ending ! !')
    
    
    
    
    
    
    
    