import torch
import torch.nn as nn
from transformers import (
    BertTokenizer,
    BertConfig,
    BertModel
)
import torch.nn.functional as F
import random,pickle,copy
import numpy as np
import os
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6"

random.seed(2021)

def get_sememe_vec():
    sememe_vec=dict()
    with open('data/sememe-vec.txt','r',encoding='utf-8') as file:
        content=file.readlines()
        for item in content[1:]:
            w=item.strip().split()[0]
            v=[float(i) for i in item.strip().split()[1:]]
            sememe_vec[w]=v
    with open(r"data/word2def.pkl",'rb') as file:
        word2def=pickle.load(file)
    with open(r"data/def2word.pkl",'rb') as file:
        def2word=pickle.load(file)
    return sememe_vec,word2def,def2word

class my_Model(nn.Module):
    def __init__(self, pretrained_path,config):
        super().__init__()
        self.bert  = BertModel.from_pretrained(pretrained_path)
        self.L=nn.Linear(config.hidden_size,200)
        # self.Wa=nn.Linear(200,1)
        self.sememe_vec,self.word2def,self.def2word=get_sememe_vec()
        self.dense=nn.Linear(400,400)
        self.activation = nn.ReLU()
        self.cls=nn.Linear(400,2)
        
    def forward(self, input_ids, token_type_ids, attention_mask,fenci,trip,o,appen, return_dict=None):
        out1 = self.bert(input_ids=input_ids, 
                        attention_mask=attention_mask, 
                        token_type_ids=token_type_ids,
                        return_dict=True)#[B*512*768],[2*768]
        last_hidden_state=self.L(out1.last_hidden_state) # torch.Size([B, 512, 200])
        pooler_output=self.L(out1.pooler_output) #torch.Size([B, 200])
        pred,label=[],[]
        if last_hidden_state.size(0)==8:
            origin = torch.index_select(last_hidden_state, 0, torch.tensor([1,3,5,7]).cuda())  # [4,20,200]
            mask = torch.index_select(last_hidden_state, 0, torch.tensor([0,2,4,6]).cuda())  # [4,20,200]
            id=0  # 按2步长增加
            pred,label=[],[]
            for a,b in zip(origin,mask):  # [20,200]
                h,r,t=fenci[id][trip[id][0]],trip[id][1],fenci[id][trip[id][2]]
                # 取核心谓词head和搭配词tail
                zu=[a[idx+1].unsqueeze(0) for idx in h] #[(1*200),(1*200)]
                h_word=torch.cat(zu,dim=0).mean(0) #torch.Size([200])
                zu=[a[idx+1].unsqueeze(0) for idx in t]
                t_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,200])——>torch.Size([200])
                
                h_def=self.word2def.get(appen[id][0],[])  # 获得核心谓词的义原def
                d=[]
                for k in h_def:
                    s_vec=self.sememe_vec.get(k,0)  # 获得义原词向量
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))
                if d==[]:
                    h_context=h_word.unsqueeze(-1) #[200*1]  # 如果没有义原词向量，则词向量为核心词本身
                else:                                        # 否则核心谓词和义原词计算注意力加权之后获得新的核心谓词向量表示
                    def_=torch.cat(d,dim=0).cuda()  # [n*200]
                    def_ = torch.tensor(def_, dtype=torch.float32)
                    # print(def_.unsqueeze(0).shape, h_word.unsqueeze(-1).unsqueeze(0).shape)
                    # [1*n*200]*[1*200*1]=[1*n*1].squeeze(0)=[n*1]
                    attn_weights = torch.bmm(def_.unsqueeze(0),h_word.unsqueeze(-1).unsqueeze(0)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    h_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                    # 得到义原注意力加权之后的向量 再 和原本的核心谓词向量加和平均 后作为最终的表示
                    h_context=torch.cat([h_context.transpose(0,1),h_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    # [200*1]
                t_def=self.word2def.get(appen[id][2],[])
                d=[]
                for k in t_def:
                    s_vec=self.sememe_vec.get(k,0)
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))
                if d==[]:
                    t_context=t_word.unsqueeze(-1) #[200*1]
                else:
                    def_=torch.cat(d,dim=0).cuda()  # [3*200]
                    def_ = torch.tensor(def_, dtype=torch.float32)
                    attn_weights = torch.bmm(def_.unsqueeze(0),t_word.unsqueeze(-1).unsqueeze(0)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    t_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                    t_context=torch.cat([t_context.transpose(0,1),t_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    # [200*1]
                zu=[b[idx+1].unsqueeze(0) for idx in t]  
                n_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,200])——>torch.Size([200]) 
                # 取负样本的义原搭配，跟搭配词有共同义原的词s,随机取一个，获得其义原s，和mask的embedding做attention 
                n_def=[]
                if t_def!=[]:
                    for item in t_def:  # 搭配词的义原，取其中一个
                        neg=self.def2word.get(item,[])
                        if neg !=[]: # 随机取一个词，使用他的义原s
                            n_def=self.word2def.get(neg[random.randint(0,len(neg))-1],[])
                        if n_def!=[]:
                            break
                d=[]
                for k in n_def:
                    s_vec=self.sememe_vec.get(k,0)
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))  # [1*200] [1*200]
                if d==[]:
                    mask_context=n_word.unsqueeze(-1) #[200*1]
                else:
                    def_=torch.cat(d,dim=0).cuda()  # [3*200]
                    def_ = torch.tensor(def_, dtype=torch.float32)
                    attn_weights = torch.bmm(def_.unsqueeze(0),n_word.unsqueeze(-1).unsqueeze(0)).squeeze(0)
                    # attn_weights = torch.bmm(def_.unsqueeze(0),mask_word.unsqueeze(-1).unsqueeze(0)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    mask_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                    # mask_context=torch.cat([mask_context.transpose(0,1),mask_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    mask_context=torch.cat([mask_context.transpose(0,1),n_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    # [200*1]
                

                # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
                # print(out.pooler_output[0].unsqueeze(0).shape,h_context.transpose(0,1).shape,t_context.transpose(0,1).shape)
                zu=[h_context.transpose(0,1),t_context.transpose(0,1)] # [(1*200),(1*200)...]
                context=torch.cat(zu,dim=1) # [1*400] 拼接
                context=self.dense(context)
                context=self.activation(context)
                p=self.cls(context) #[1*400]*[400,2]——>[1*2]
                pred.append(p)
                label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1
                
                zu=[h_context.transpose(0,1),mask_context.transpose(0,1)]  # [(1*200),(1*200)...]
                negative=torch.cat(zu,dim=1)  # [1*400] 拼接
                negative=self.dense(negative)
                negative=self.activation(negative)
                n=self.cls(negative) # [1*400]*[400*2]——>[1*2]
                pred.append(n)
                label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
                f=copy.deepcopy(fenci[id])
                f.remove(h)
                f.remove(t)  # 在除去h,t之外的取词作为负例 
                n_pred=min(5, max(1, int(len(f) * 0.15)))
                # print(n_pred)
                random.shuffle(f)
                # 随机取15%的负例：
                for r in f[:n_pred]: 
                    zu=[a[idx+1].unsqueeze(0) for idx in r] #[(1*200),(1*200)]
                    rand_word=torch.cat(zu,dim=0).mean(0) #torch.Size([200])
                    zu=[h_context.transpose(0,1),rand_word.unsqueeze(0)]  # [(1*200),(1*200)...]
                    negative=torch.cat(zu,dim=1)  # [1*400] 拼接
                    negative=self.dense(negative)
                    negative=self.activation(negative)
                    n=self.cls(negative) # [1*400]*[400*2]——>[1*2]
                    pred.append(n)
                    label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
                
                id+=2
            pred=torch.cat(pred,dim=0)  # [n*2]
            label=torch.tensor(label).cuda()
        return pred,label
        
    
class base_Model(nn.Module):
    def __init__(self, pretrained_path,config):
        super().__init__()
        self.bert  = BertModel.from_pretrained(pretrained_path)
        self.dense=nn.Linear(768*2,768*2)
        self.activation = nn.ReLU()
        self.cls=nn.Linear(768*2,2)
    def forward(self, input_ids, token_type_ids, attention_mask,orig,fenci,trip,appen,return_dict=None):
        out1 = self.bert(input_ids=input_ids, 
                        attention_mask=attention_mask, 
                        token_type_ids=token_type_ids,
                        return_dict=True)#[B*512*768],[2*768]
        last_hidden_state=out1.last_hidden_state # torch.Size([B, 512, 768])
        pooler_output=out1.pooler_output #torch.Size([B, 768])
        pred,label=[],[]
        # print('last_hidden_state.shape:',last_hidden_state.shape,last_hidden_state.size(0))
        # print('fenci:',fenci)
        # print('trip:',trip)
        # print('orig:',orig)
        # print('appen:',appen)
        for id in range(last_hidden_state.size(0)):
            for item in trip[id]:
                h,r,t=fenci[id][item[0]],item[1],fenci[id][item[2]]
                # 取核心谓词head和搭配词tail
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                h_word=torch.cat(zu,dim=0).mean(0) #torch.Size([768])
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
                t_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,768])——>torch.Size([768])
                h_context=h_word.unsqueeze(0)  # [1*768]
                t_context=t_word.unsqueeze(0)  # [1*768]
                
                # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
                zu=[h_context,t_context] # [(1*768),(1*768)...]
                context=torch.cat(zu,dim=1) # [1*768*2] 拼接
                context=self.dense(context)
                context=self.activation(context)
                p=self.cls(context) # [1*(768*2)]*[(768*2)*2]——>[1*2]
                pred.append(p)
                label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1
                # 取负例子
                f=copy.deepcopy(fenci[id])
                f.remove(h)
                f.remove(t)  # 在除去h,t之外的取词作为负例 
                n_pred=min(5, max(1, int(len(f) * 0.15)))  # 随机取15%的负例：
                random.shuffle(f)
                for r in f[:n_pred]: 
                    zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in r] #[(1*768),(1*768)]
                    rand_word=torch.cat(zu,dim=0).mean(0)  # torch.Size([768])
                    zu=[h_context,rand_word.unsqueeze(0)]  # [(1*768),(1*768)...]
                    negative=torch.cat(zu,dim=1)  # [1*768*2] 拼接
                    negative=self.dense(negative)
                    negative=self.activation(negative)
                    n=self.cls(negative)  # [1*(768*2)]*[(768*2)*2]——>[1*2]
                    pred.append(n)
                    label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
        pred=torch.cat(pred,dim=0)  # [n*2]
        label=torch.tensor(label).cuda()
        return pred,label  
    
    
class Model_3(nn.Module):
    def __init__(self, pretrained_path,config):
        super().__init__()
        self.bert  = BertModel.from_pretrained(pretrained_path)
        self.dense=nn.Linear(768*2,768*2)
        self.dense_s=nn.Linear(200*2,200*2)
        self.dense_a=nn.Linear(200*2+768*2,200*2+768*2)
        self.activation = nn.ReLU()
        self.cls=nn.Linear(768*2+200*2,2)
        self.cls_n=nn.Linear(768*2,2)
        self.sememe_vec,self.word2def,self.def2word=get_sememe_vec()
        self.L = nn.Linear(config.hidden_size,200)
        
    def forward(self, input_ids, token_type_ids, attention_mask,orig,fenci,trip,appen,return_dict=None):
        out1 = self.bert(input_ids=input_ids, 
                        attention_mask=attention_mask, 
                        token_type_ids=token_type_ids,
                        return_dict=True)#[B*512*768],[2*768]
        last_hidden_state=out1.last_hidden_state # torch.Size([B, 512, 768])
        pooler_output=out1.pooler_output #torch.Size([B, 768])
        pred,label=[],[]
        # print('last_hidden_state.shape:',last_hidden_state.shape,last_hidden_state.size(0))
        for id in range(last_hidden_state.size(0)):
            for item, hz in zip(trip[id],appen[id]):
                h,r,t=fenci[id][item[0]],item[1],fenci[id][item[2]]
                # 取核心谓词head和搭配词tail
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                h_word=torch.cat(zu,dim=0).mean(0) #torch.Size([768])
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
                t_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,768])——>torch.Size([768])
                h_f=h_word.unsqueeze(0)  # [1*768]
                t_f=t_word.unsqueeze(0)  # [1*768]
                
                # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
                zu=[h_f,t_f] # [(1*768),(1*768)...]
                context=torch.cat(zu,dim=1) # [1*768*2] 拼接
                context=self.dense(context)
                context=self.activation(context)

                h_word = self.L(h_word.unsqueeze(0))  #[1*768]*[768*200]=[1*200]
                t_word = self.L(t_word.unsqueeze(0))  #[1*768]*[768*200]=[1*200]
                
                h_def=self.word2def.get(hz[0],[])  # 获得核心谓词的义原def
                d=[]
                for k in h_def:
                    s_vec=self.sememe_vec.get(k,0)  # 获得义原词向量
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))
                if d==[]:
                    h_context=h_word.transpose(0,1) #[200*1]  # 如果没有义原词向量，则词向量为核心词本身
                else:                                        # 否则核心谓词和义原词计算注意力加权之后获得新的核心谓词向量表示
                    def_=torch.cat(d,dim=0).to(device)  # [n*200]
                    def_ = torch.tensor(def_, dtype=torch.float32).cuda()
                    # [1*n*200]*[1*200*1]=[1*n*1].squeeze(0)=[n*1]
                    attn_weights = torch.bmm(def_.unsqueeze(0),h_word.unsqueeze(-1)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    h_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                    
                t_def=self.word2def.get(hz[2],[])
                d=[]
                for k in t_def:
                    s_vec=self.sememe_vec.get(k,0)
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))
                if d==[]:
                    t_context=t_word.transpose(0,1) #[200*1]
                else:
                    def_=torch.cat(d,dim=0)  # [3*200]
                    def_ = torch.tensor(def_, dtype=torch.float32).cuda()
                    attn_weights = torch.bmm(def_.unsqueeze(0),t_word.unsqueeze(-1)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    t_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                
                zu=[h_context.transpose(0,1),t_context.transpose(0,1)] # [(1*200),(1*200)...]
                sememe=torch.cat(zu,dim=1) # [1*200*2] 拼接 = [1*400]
                sememe=self.dense_s(sememe)
                sememe=self.activation(sememe)
                
                # 原始词和义原词两个都过了一个前馈网络之后合并送入最后一个分类层
                # sememe  [1*400]  # context [1*768*2] 
                zu=[context,sememe] # [(1*768*2),(1*200*2)...]
                context=torch.cat(zu,dim=1) # 拼接 = [1*(768*2+200*2)] 
                context = self.dense_a(context)
                p=self.cls(context) # [1*(768*2+200*2)]*[(768*2+200*2)*2]——>[1*2]
                pred.append(p)
                label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1

                # 取负例子
                f=copy.deepcopy(fenci[id])
                f.remove(h)
                f.remove(t)  # 在除去h,t之外的取词作为负例 
                n_pred=min(5, max(1, int(len(f) * 0.15)))  # 随机取15%的负例：
                random.shuffle(f)
                for r in f[:n_pred]: 
                    zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in r] #[(1*768),(1*768)]
                    rand_word=torch.cat(zu,dim=0).mean(0)  # torch.Size([768])
                    zu=[h_f,rand_word.unsqueeze(0)]  # [(1*768),(1*768)...]
                    negative=torch.cat(zu,dim=1)  # [1*768*2] 拼接
                    negative=self.dense(negative)
                    negative=self.activation(negative)
                    n=self.cls_n(negative)  # [1*(768*2)]*[(768*2)*2]——>[1*2]
                    n = torch.softmax(n,dim=1)
                    pred.append(n)
                    label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
        pred=torch.cat(pred,dim=0)  # [n*2]
        label=torch.tensor(label).cuda()
        return pred,label  
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
'''
# 把两个en词和[CLS]向量整合做注意力加权之后得到新的表示向量，送入下一个阶段计算
# print(out.pooler_output[0].unsqueeze(0).shape,h_context.transpose(0,1).shape,t_context.transpose(0,1).shape)
zu=[pooler_output[id].unsqueeze(0),h_context.transpose(0,1),t_context.transpose(0,1)] #[(1*200),(1*200)...]
M_vec=torch.cat(zu,dim=0) # [3*200]
attn_energies = self.Wa(M_vec) #[3*200]*[200*1]=[3*1]
attn_energies = F.softmax(attn_energies, dim=0) # [3*1]
context = torch.bmm(M_vec.transpose(0,1).unsqueeze(0),attn_energies.unsqueeze(0)).squeeze(0)
# [1*200*3]*[1*3*1]=[1*200*1].squeeze(0)=[200*1]
p=self.cls(context.transpose(0,1)) #[1*2]
pred.append(p)
label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1
zu=[h_context.transpose(0,1),mask_context.unsqueeze(0)]  # [2*200]
negative=torch.cat(zu,dim=0).mean(0)  #[200]
neg=self.cls(negative.unsqueeze(0)) # [1*200]*[200*2]——>[1*2]
pred.append(neg)
label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
'''