from sklearn.metrics import precision_score
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
import torch.optim as optim
from importlib import util
import copy, random,os
import numpy as np
import re
from sklearn.metrics import classification_report
from tensorboardX import SummaryWriter
from transformers import WEIGHTS_NAME, CONFIG_NAME
import collections
import datetime
from transformers import (
    BertTokenizer,
    BertConfig,
    BertModel,
    AutoTokenizer,
    AutoModel
)
import argparse,pickle
from optimization import BERTAdam
from tensorboardX import SummaryWriter
from transformers import WEIGHTS_NAME, CONFIG_NAME
from utils import make_data,Mytest
from model import base_Model
import random
from sklearn.metrics import roc_auc_score
from sklearn import metrics

device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "3,4"
n_gpu = torch.cuda.device_count()
best_score = 0



class_num = ['Reas' , 'Cons' , 'CaiLiao' , 'Stat' , 'Sco' , 'GongJu' , 'Dir' , 'YuanDian' , 'Time' , 'YiJu' , 'Quan' , 'FangShi' , 'ZhongDian' , 'LinTi' , 'Aft' , 'dsbj' , 'Prod' , 'Poss' , 'Loc' , 'Belg' , 'Datv' , 'Pat' , 'Clas' , 'Cont' , 'dobj' , 'Agt' , 'Exp' ]
class_id = {class_num[i] : i for i in range(len(class_num))}

id_class = {i:w for w,i in class_id.items()}


def set_seed():
    random.seed(1)
    np.random.seed(1)
    torch.manual_seed(1)  # 为CPU设置随机种子
    torch.cuda.manual_seed_all(1)  # 为多GPU设置随机种子
    torch.cuda.manual_seed(1)  # 单GPU设置
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.enabled = False  # 为卷积网络设置随机种子值

'''
make_data
batch[3] :'抓住这一环，就为提高产品质量打下了基础。'
batch[4] :[[0, 1], [2], [3], [4], [5], [6], [7], [8, 9]] 
batch[5] :[8, 6]
batch[6] :['抓住/v', '这/r', '一/m', '环/n', '，/w', '就/d', '为/p', '提高/v', '产品/n', '质量/n', '打下/v', '了/y', '基础/n', '。/w']
'''

# 预测 & val set 计算 acc
def predict(model,test_loader,result_file):
    model.eval()
    file=open(result_file,'w',encoding='utf-8')
    pred_all, true_all = [], []
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            out = model.bert(input_ids=batch[0].to(device),
                                token_type_ids=batch[1].to(device),
                                attention_mask=batch[2].to(device),
                                return_dict=True)
            pooler_output = out.pooler_output #torch.Size([B, 768])
            for i in range(len(batch[3])):
                for s in range(len(batch[5][i])):  # 记录核心谓词idx
                    zu = [out.last_hidden_state[i][idx+1].unsqueeze(0) for idx in batch[4][i][batch[5][i][s][0]]] #[(1*768),(1*768)]
                    center = torch.cat(zu,dim=0).mean(0).unsqueeze(0) #torch.Size([768])
                    zu = [out.last_hidden_state[i][idx+1].unsqueeze(0) for idx in batch[4][i][batch[5][i][s][-1]]] #[(1*768),(1*768)]
                    tail = torch.cat(zu,dim=0).mean(0).unsqueeze(0) #torch.Size([768])
                    t_word = batch[7][i][batch[5][i][s][-1]]
                    c_word = batch[6][i][s][0]
                    # result = model.scorer(center.unsqueeze(0), tail.unsqueeze(0))
                    # pred = result.squeeze(0).squeeze(0)  # [1,27]
                    zu=[center,tail] #[(1*768),(1*768)...]
                    M_vec=torch.cat(zu,dim=1) # [1*768*2]
                    M_vec=model.dense(M_vec)
                    M_vec=model.activation(M_vec)
                    pred=model.cls(M_vec)  # [1*768]*[768*2]——>[1*4]
                    pred=torch.softmax(pred,dim=1)  # [1*4]
                    p = torch.max(pred, 1)[1].cpu().numpy()[0]
                    pred_all.append(int(p))
                    # print(batch[6][i][s])
                    # print(batch[6][i][s][2])
                    true_all.append(class_id[batch[6][i][s][2]])  # batch[6][i][s][0]
                    file.write(batch[6][i][s][0]+'_'+batch[6][i][s][1]+'_'+batch[6][i][s][2]+ '_'+batch[6][i][s][3] + '\n')
                    file.write('#'+''.join(x for x in batch[7][i])+'\n')  # batch[5]是原本的词token
                    file.write(id_class[p] + '\n')
                    file.write('\n')               
    acc = metrics.accuracy_score(true_all, pred_all)
    print('acc:', acc)
    measure_result = classification_report(true_all, pred_all)
    print('measure_result = \n', measure_result)
    file.close()


if __name__ == '__main__':
    set_seed()  # 设置随机种子
    best_score = 0
    print('n_gpu:',n_gpu)
    start_time = datetime.datetime.now()
    tokenizer = BertTokenizer.from_pretrained('chinese_wwm_pytorch')
    input=tokenizer('这是一条测试句', return_tensors="pt", padding='max_length', max_length=10, truncation=True)
    print("tokenizer:", tokenizer)
    print("vocab_size:", len(tokenizer.vocab))
    print(tokenizer.convert_tokens_to_ids(tokenizer.mask_token),tokenizer.mask_token)

    pred_path='./data/role/test_data.pkl'
    pred_path='./data/label_data/gold_data_process/gold_manual_data1000.pkl' 
    # pred_path='./data/shuffle_manual_role/test_data.pkl'
    # pred_path='./data/shuffle_manual_role/gold_unmanual.pkl'
    pred_data=Mytest(pred_path)
    pred_sampler = Data.SequentialSampler(pred_data)
    pred_loader = Data.DataLoader(pred_data,batch_size=1, sampler=pred_sampler,collate_fn=make_data)
    config = BertConfig.from_pretrained('chinese_wwm_pytorch')
    print("loading model for edicting...")
    # M_predict=torch.load('saved_dict_now/model_role27_2.pkl')
    # M_predict=torch.load('saved_dict_now/model_affine_3.pkl')
    M_predict=torch.load('saved_dict_now/model_liner_3.pkl')
    # M_predict=torch.load('saved_dict_now/model_role_affine.pkl')
    # M_predict=torch.load('saved_dict_now/model.pkl',map_location='cpu')
    # M_predict=torch.load('models/mode_40.pkl',map_location=lambda storage, loc: storage)
    result_file='predict_result/result_role.txt'
    predict(M_predict, pred_loader,result_file)
    print('predicting ending ! !')
    
    
    
    
    
    
    
    