import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
import torch.optim as optim
from importlib import util
import copy, random,os
import numpy as np
import re
from tensorboardX import SummaryWriter
from transformers import WEIGHTS_NAME, CONFIG_NAME
import collections
import datetime
from transformers import (
    BertTokenizer,
    BertConfig,
    BertModel,
    AutoTokenizer,
    AutoModel
)
import argparse,pickle
from optimization import BERTAdam
from tensorboardX import SummaryWriter
from transformers import WEIGHTS_NAME, CONFIG_NAME
from utils import make_data,Mytest
from model import my_Model,base_Model,Model_3,get_sememe_vec
import random
output_dir = 'saved_dict_now/'
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)

device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6"
# os.environ["CUDA_VISIBLE_DEVICES"] = "7"
n_gpu = torch.cuda.device_count()
best_score = 0

def set_seed():
    random.seed(1)
    np.random.seed(1)
    torch.manual_seed(1)  # 为CPU设置随机种子
    torch.cuda.manual_seed_all(1)  # 为多GPU设置随机种子
    torch.cuda.manual_seed(1)  # 单GPU设置
    torch.backends.cudnn.deterministic = True  # 保证每次结果一样
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.enabled = False  # 为卷积网络设置随机种子值

def save_model(model,tokenizer):
    model_to_save = model.module if hasattr(model, 'module') else model
    torch.save(model_to_save.state_dict(), output_model_file)  # 保存模型参数
    model_to_save.config.to_json_file(output_config_file)
    tokenizer.save_vocabulary(output_dir)
    

# 预测 & val set 计算 acc
def predict(model,test_loader,result_file):
    model.eval()
    file=open(result_file,'w',encoding='utf-8')
    _all,count,c = 0,0,0
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            out = model.module.bert(input_ids=batch[0].to(device),
                                token_type_ids=batch[1].to(device),
                                attention_mask=batch[2].to(device),
                                return_dict=True)
            pooler_output = out.pooler_output #torch.Size([B, 768])
            for i in range(len(batch[3])):
                all_words = []  
                for h in batch[4][i]:  # 记录每个分词的在句中的索引
                    zu = [out.last_hidden_state[i][idx+1].unsqueeze(0) for idx in h] #[(1*200),(1*200)]
                    word = torch.cat(zu,dim=0).mean(0) #torch.Size([200])
                    all_words.append(word)  # 获得每个分词对应的embedding
                for s in range(len(batch[5][i])):  # 记录核心谓词idx
                    center = all_words[batch[5][i][s][0]].unsqueeze(0)  # 核心谓词向量 [1*200]
                    tail = all_words[batch[5][i][s][2]].unsqueeze(0)  # 核心谓词向量 [1*200] TODO
                    c_word=batch[6][i][s][0]
                    cos_score=dict()
                    for m in range(len(all_words)):
                        if m !=batch[5][i][s][0]:  # 除了核心谓词以外的词
                            other_w=batch[7][i][m]
                            if other_w.split('/')[1] in ['w','u','c','a','f','m','d','y','p']: # 标点符号,连词等不参与候选词运算
                                    continue
                            zu=[center,all_words[m].unsqueeze(0)] #[(1*200),(1*200)...]
                            M_vec=torch.cat(zu,dim=1) # [1*400]
                            M_vec=model.module.dense(M_vec)
                            M_vec=model.module.activation(M_vec)
                            pred=model.module.cls(M_vec)  # [1*400]*[400*2]——>[1*2]
                            pred=torch.softmax(pred,dim=1)
                            cos_score[float(pred[0][1].cpu().numpy())]=c_word+'_'+other_w
                    result = sorted(cos_score.items(), key=lambda t: t[0], reverse=True)  # list
                    file.write(batch[6][i][s][0]+'_'+batch[6][i][s][1]+'_'+batch[6][i][s][2]+'\n')
                    file.write('#'+''.join(x for x in batch[3][i])+'\n')  # batch[5]是原本的词token
                    top5 = []
                    if len(result) > 3:
                        c += 1
                    for item in result[:3]: 
                        x = re.split('[_/]', item[1])
                        top5.append(x[0]+'_' + x[1])
                    _all += 1
                    if batch[6][i][s][0]+'_' + batch[6][i][s][2] in top5:
                        count += 1
                    for item in result:
                        file.write(item[1] + '\t' + str(item[0]) + '\n')
                    file.write('\n')
        file.close()
        acc = count / _all
        print(c/_all)
        return count, _all, acc

def def_vec(h_def,_word,sememe_vec):
    d=[]
    for k in h_def:
        s_vec=sememe_vec.get(k,0)  # 获得义原词向量
        if s_vec==0:
            pass
        else:
            s_vec=torch.from_numpy(np.array(s_vec))
            d.append(s_vec.unsqueeze(0))
    if d==[]:
        att_context=_word.transpose(0,1) # [200*1]  # 如果没有义原词向量，则词向量为核心词本身
    else:          # 否则核心谓词和义原词计算注意力加权之后获得新的核心谓词向量表示
        def_=torch.cat(d,dim=0)  # [n*200]
        def_ = torch.tensor(def_, dtype=torch.float32).cuda()
        # [1*n*200]*[1*200*1]=[1*n*1].squeeze(0)=[n*1]
        attn_weights = torch.bmm(def_.unsqueeze(0),_word.unsqueeze(-1)).squeeze(0) 
        soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
        att_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
        # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
    return att_context  # [200*1]
    
def predict_2(model,test_loader,result_file):
    sememe_vec,word2def,def2word = get_sememe_vec()
    model.eval()
    file=open(result_file,'w',encoding='utf-8')
    _all,count,c = 0,0,0
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            out = model.module.bert(input_ids=batch[0].to(device),
                                token_type_ids=batch[1].to(device),
                                attention_mask=batch[2].to(device),
                                return_dict=True)
            pooler_output = out.pooler_output #torch.Size([B, 768])
            for i in range(len(batch[3])):
                all_words = []  
                for h in batch[4][i]:  # 记录每个分词的在句中的索引
                    zu = [out.last_hidden_state[i][idx+1].unsqueeze(0) for idx in h] #[(1*200),(1*200)]
                    word = torch.cat(zu,dim=0).mean(0) #torch.Size([200])
                    all_words.append(word)  # 获得每个分词对应的embedding
                for s in range(len(batch[5][i])):  # 记录核心谓词idx
                    center = all_words[batch[5][i][s][0]].unsqueeze(0)  # 核心谓词向量 [1*768]
                    tail = all_words[batch[5][i][s][2]].unsqueeze(0)  # 核心谓词向量 [1*768] 
                    c_word=batch[6][i][s][0]
                    cos_score=dict()
                    for m in range(len(all_words)):
                        if m !=batch[5][i][s][0]:  # 除了核心谓词以外的词
                            other_w=batch[7][i][m]
                            o_word=batch[7][i][m].split('/')[0]
                            if other_w.split('/')[1] in ['w','u','c','a','f','m','d','y','p']: # 标点符号,连词等不参与候选词运算
                                    continue
                            zu=[center,all_words[m].unsqueeze(0)] #[(1*768),(1*768)...]
                            M_vec=torch.cat(zu,dim=1) # [1*768*2]
                            M_vec=model.module.dense(M_vec)
                            context=model.module.activation(M_vec)
                            # 
                            h_word = model.module.L(center)  #[1*768]*[768*200]=[1*200]
                            t_word = model.module.L(all_words[m].unsqueeze(0))  #[1*768]*[768*200]=[1*200]
                            h_context = def_vec(word2def.get(c_word,[]),h_word,sememe_vec)
                            t_context = def_vec(word2def.get(o_word,[]),t_word,sememe_vec)  # [200*1]
                            zu=[h_context.transpose(0,1),t_context.transpose(0,1)] # [(1*200),(1*200)...]
                            sememe=torch.cat(zu,dim=1) # [1*200*2] 拼接 = [1*400]
                            sememe=model.module.dense_s(sememe)
                            sememe=model.module.activation(sememe)
                            
                            zu=[context,sememe] # [(1*768*2),(1*200*2)...]
                            context=torch.cat(zu,dim=1) # 拼接 = [1*(768*2+200*2)] 
                            context = model.module.dense_a(context)
                            pred=model.module.cls(context) # [1*(768*2+200*2)]*[(768*2+200*2)*2]——>[1*2]
                            pred=torch.softmax(pred,dim=1)
                            cos_score[float(pred[0][1].cpu().numpy())]=c_word+'_'+other_w
                    result = sorted(cos_score.items(), key=lambda t: t[0], reverse=True)  # list
                    file.write(batch[6][i][s][0]+'_'+batch[6][i][s][1]+'_'+batch[6][i][s][2]+'\n')
                    file.write('#'+''.join(x for x in batch[3][i])+'\n')  # batch[5]是原本的词token
                    top5 = []
                    if len(result) > 3:
                        c += 1
                    for item in result[:3]: 
                        x = re.split('[_/]', item[1])
                        top5.append(x[0]+'_' + x[1])
                    _all += 1
                    if batch[6][i][s][0]+'_' + batch[6][i][s][2] in top5:
                        count += 1
                    for item in result:
                        file.write(item[1] + '\t' + str(item[0]) + '\n')
                    file.write('\n')
        file.close()
        acc = count / _all
        print(c/_all)
        return count, _all, acc
def train(config):
    best_score = 0
    model=base_Model('bert-base-chinese',config=config)
    # model=Model_3('bert-base-chinese',config=config)
    model.config=config
    model.to(device)
    print('n_gpu:',n_gpu)
    if n_gpu > 1:
        model = torch.nn.DataParallel(model)
    criterion = nn.CrossEntropyLoss()
    num_train_epochs=2  # 5
    total_steps = int(len(train_loader) * num_train_epochs)
    optimizer = BERTAdam(params=model.parameters(),
                         lr=2e-5,
                         warmup=0.1,
                         max_grad_norm=1.0,
                         t_total=total_steps,
                         schedule='warmup_linear',
                         weight_decay_rate=0.01)
    writer = SummaryWriter(log_dir='scalar')
    model.train()
    for ep in range(num_train_epochs):
        for i, batch in enumerate(train_loader):
            pred,label=model(input_ids=batch[0].to(device),
                            token_type_ids=batch[1].to(device),
                            attention_mask=batch[2].to(device),
                            orig=batch[3],
                            fenci=batch[4],
                            trip=batch[5],
                            appen=batch[6],
                            return_dict=True )
            if len(label)==0:
                pass
            else:
                loss = criterion(pred,label)
                if n_gpu > 1:
                    loss = loss.mean()
                if i % 100 == 0:
                    writer.add_scalar('scalar/loss1', loss,i)
                print(loss,i)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                if (i) % 1000 == 0:  # 每隔10000 steps在val set 上进行预测查看结果
                    print('start validating...')
                    count,_all,acc=predict(model, val_loader,'result/result_val_'+str(i)+'.txt')
                    print("{} : Validation top5: {}".format(int(i / 1000), count))
                    print("{} : Validation all: {}".format(int(i / 1000), _all))
                    print("{} : Validation Accuracy: {}".format(int(i / 1000), acc))
                    if best_score < acc:
                        best_score = acc
                        torch.save(model,'saved_dict_now/model.pkl')  # 保存模型
    end_time = datetime.datetime.now()
    during_time = end_time - start_time
    print("train done！！！during_time:", during_time)

if __name__ == '__main__':
    set_seed()  # 设置随机种子
    best_score = 0
    print('n_gpu:',n_gpu)
    start_time = datetime.datetime.now()
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    input=tokenizer('这是一条测试句', return_tensors="pt", padding='max_length', max_length=10, truncation=True)
    print("tokenizer:", tokenizer)
    print("vocab_size:", len(tokenizer.vocab))
    print(tokenizer.convert_tokens_to_ids(tokenizer.mask_token),tokenizer.mask_token)

    train_path='./data/final/train_data.pkl'
    val_path='./data/final/val_data.pkl'
    test_path='./data/final/test_data.pkl'
    train_data=Mytest(train_path)
    val_data=Mytest(val_path)
    test_data=Mytest(test_path)
    print(train_data.__getitem__(1))
    print(val_data.__getitem__(1))
    print(test_data.__getitem__(1))
    
    train_sampler = Data.RandomSampler(train_data)
    val_sampler = Data.SequentialSampler(val_data)
    test_sampler = Data.SequentialSampler(test_data)
    
    train_loader = Data.DataLoader(train_data, batch_size=24,sampler=train_sampler,collate_fn=make_data)
    val_loader = Data.DataLoader(val_data, batch_size=2,sampler=val_sampler,collate_fn=make_data)
    test_loader = Data.DataLoader(test_data,batch_size=2, sampler=test_sampler,collate_fn=make_data)
    
    config = BertConfig.from_pretrained('bert-base-chinese')
    train(config)
    print("loading model for predicting...")
    M_predict=torch.load('saved_dict_now/model.pkl')
    result_file='result_cls.txt'
    count,_all,acc=predict(M_predict,test_loader,result_file)
    print("test top5命中: {}".format(count))
    print("test all 实体对: {}".format(_all))
    print("test Accuracy: {}".format(acc))
    print('predicting ending！！')
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    

'''
    # 直接使用bert编码随机初始化cls分类层预测，不经过训练
    model=BertModel.from_pretrained('chinese_L-12_H-768_A-12').to(device)
    cls=nn.Linear(768*2,2).to(device)
    model.eval()
    file=open('bert_result.txt','w',encoding='utf-8')
    _all,count = 0,0
    with torch.no_grad():
        for i, batch in enumerate(test_loader):
            out=model(input_ids=batch[0].to(device),
                        token_type_ids=batch[1].to(device),
                        attention_mask=batch[2].to(device),
                        return_dict=True)
            for i in range(len(batch[3])):
                all_words=[]  
                for h in batch[3][i]:  # 记录每个分词的在句中的索引
                    zu=[out.last_hidden_state[i][idx+1].unsqueeze(0) for idx in h] #[(1*200),(1*200)]
                    word=torch.cat(zu,dim=0).mean(0) #torch.Size([200])
                    all_words.append(word)  # 获得每个分词对应的embedding
                for s in range(len(batch[4][i])):  # 记录核心谓词idx
                    center=all_words[batch[4][i][s]].unsqueeze(0)  # 核心谓词向量 [1*200]
                    #pooler_output[i] [CLS] 句向量表示embedding
                    c_word=''.join(q for q in [batch[5][i][j] for j in batch[3][i][batch[4][i][s]]])
                    cos_score=dict()
                    for m in range(len(all_words)):
                        if m !=batch[4][i][s]:  # 除了核心谓词以外的词
                            other_w=batch[7][i][m]
                            if other_w.split('/')[1] in ['w','u','c','a','f','m','d','y','p']: # 标点符号,连词等不参与候选词运算
                                    continue
                            zu=[center,all_words[m].unsqueeze(0)] #[(1*200),(1*200)...]
                            M_vec=torch.cat(zu,dim=1) # [1*400]
                            pred=cls(M_vec)  # [1*400]*[400*2]——>[1*2]
                            cos_score[float(pred[0][1].cpu().numpy())]=c_word+'_'+other_w
                            # cos_score[M.cos(center,all_words[m].unsqueeze(0)).cpu().numpy().tolist()[0]]=c_word+'_'+other_w  # 计算cosin 相似度
                    result = sorted(cos_score.items(), key=lambda t: t[0], reverse=True)  # list
                    # for c in batch[6][i]:  # 记录原本的依存三元组
                    file.write(batch[6][i][s][0]+'_'+batch[6][i][s][1]+'_'+batch[6][i][s][2]+'\n')
                    file.write('#'+''.join(x for x in batch[5][i])+'\n')  # batch[5]是原本的词token
                    top5=[]
                    for item in result[:3]: 
                        x=re.split('[_/]',item[1])
                        top5.append(x[0]+'_'+x[1])
                    _all+=1
                    if batch[6][i][s][0]+'_'+batch[6][i][s][2] in top5:
                        count+=1
                    for item in result:
                        file.write(item[1]+'\t'+str(item[0])+'\n')
                    file.write('\n')
        file.close()
        acc=count/_all
        print("test top5命中: {}".format(count))
        print("test all 实体对: {}".format(_all))
        print("test Accuracy: {}".format(acc))
        print('predicting ending！！')

'''

       
'''
# 预测部分二： 获得两两词之后计算余弦相似度得分排序
print("loading model for predicting...")
M= my_Model('saved_dict',config=config).to(device)
M.eval()
file=open('result_cos.txt','w',encoding='utf-8')
with torch.no_grad():
    for i, batch in enumerate(test_loader):
        out=M.bert(input_ids=batch[0].to(device),
                    token_type_ids=batch[1].to(device),
                    attention_mask=batch[2].to(device),
                    return_dict=True)
        for i in range(len(batch[3])):
            all_words=[]  
            for h in batch[3][i]:  # 记录每个分词的在句中的索引
                zu=[M.L(out.last_hidden_state[i][idx+1].unsqueeze(0)) for idx in h] #[(1*200),(1*200)]
                word=torch.cat(zu,dim=0).mean(0) #torch.Size([200])
                all_words.append(word)  # 获得每个分词对应的embedding
            for h in batch[4][i]:  # 记录核心谓词idx
                center=all_words[h].unsqueeze(0)
                c_word=''.join(q for q in [batch[5][i][j] for j in batch[3][i][h]])
                cos_score=dict()
                for m in range(len(all_words)):
                    if m !=h:
                        other_w=''.join(q for q in [batch[5][i][j] for j in batch[3][i][m]])
                        if other_w in punc:  # 标点符号不参与候选词运算
                            continue
                        cos_score[M.cos(center,all_words[m].unsqueeze(0)).cpu().numpy().tolist()[0]]=c_word+'_'+other_w  # 计算cosin 相似度
                result = sorted(cos_score.items(), key=lambda t: t[0], reverse=True)  # list
                for c in batch[6][i]:  # 记录原本的依存三元组
                    file.write(c[0]+'_'+c[1]+'_'+c[2]+'\n')
                file.write('#'+''.join(x for x in batch[5][i])+'\n')  # batch[5]是原本的词token
                for item in result:  # 计算核心谓词和其他所有词的 cosin 相似度距离
                    file.write(item[1]+'\t'+str(item[0])+'\n')
                file.write('\n')
    print('predicting ending！！')
    file.close()

'''
        
        

                    




























