import torch
import torch.nn as nn
from transformers import (
    BertTokenizer,
    BertConfig,
    BertModel
)
import torch.nn.functional as F
import random,pickle,copy
import numpy as np
import os
class_num = {'sbj_HX':1,'obj_HX':2,'_BY':3,'mod_BY':3}

random.seed(2021)
class base_Model(nn.Module):
    def __init__(self, pretrained_path,config):
        super().__init__()
        self.bert  = BertModel.from_pretrained(pretrained_path)
        self.dense=nn.Linear(768*2,768*2)
        self.activation = nn.ReLU()
        self.cls=nn.Linear(768*2,4)
    def forward(self, input_ids, token_type_ids, attention_mask,orig,fenci,trip,appen,return_dict=None):
        out1 = self.bert(input_ids=input_ids, 
                        attention_mask=attention_mask, 
                        token_type_ids=token_type_ids,
                        return_dict=True)#[B*512*768],[2*768]
        last_hidden_state=out1.last_hidden_state # torch.Size([B, 512, 768])
        pooler_output=out1.pooler_output #torch.Size([B, 768])
        pred,label=[],[]
        # print('last_hidden_state.shape:',last_hidden_state.shape,last_hidden_state.size(0))
        # print('fenci:',fenci)
        # print('trip:',trip)
        # print('orig:',orig)
        # print('appen:',appen)
        for id in range(last_hidden_state.size(0)):
            for item in trip[id]:
                h,r,t=fenci[id][item[0]],item[1],fenci[id][item[2]]
                # 取核心谓词head和搭配词tail
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                h_word=torch.cat(zu,dim=0).mean(0) #torch.Size([768])
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
                t_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,768])——>torch.Size([768])
                h_context=h_word.unsqueeze(0)  # [1*768]
                t_context=t_word.unsqueeze(0)  # [1*768]
                
                # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
                zu=[h_context,t_context] # [(1*768),(1*768)...]
                context=torch.cat(zu,dim=1) # [1*768*2] 拼接
                context=self.dense(context)
                context=self.activation(context)
                p=self.cls(context) # [1*(768*2)]*[(768*2)*2]——>[1*2]
                pred.append(p)
                label.append(class_num[r])  # 依存三元组注意力加权之后的分类得分，label=1
                # 取负例子
                f=copy.deepcopy(fenci[id])
                f.remove(h)
                f.remove(t)  # 在除去h,t之外的取词作为负例 
                n_pred=min(5, max(1, int(len(f) * 0.15)))  # 随机取15%的负例：
                random.shuffle(f)
                for r in f[:n_pred]: 
                    zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in r] #[(1*768),(1*768)]
                    rand_word=torch.cat(zu,dim=0).mean(0)  # torch.Size([768])
                    zu=[h_context,rand_word.unsqueeze(0)]  # [(1*768),(1*768)...]
                    negative=torch.cat(zu,dim=1)  # [1*768*2] 拼接
                    negative=self.dense(negative)
                    negative=self.activation(negative)
                    n=self.cls(negative)  # [1*(768*2)]*[(768*2)*2]——>[1*4]
                    pred.append(n)
                    label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
        pred=torch.cat(pred,dim=0)  # [n*4]
        label=torch.tensor(label).cuda()
        # print("pred.shape, label.shape",pred.shape, label.shape)
        return pred,label  
    
    

    
    
    
    
    
    
    
'''
# 把两个en词和[CLS]向量整合做注意力加权之后得到新的表示向量，送入下一个阶段计算
# print(out.pooler_output[0].unsqueeze(0).shape,h_context.transpose(0,1).shape,t_context.transpose(0,1).shape)
zu=[pooler_output[id].unsqueeze(0),h_context.transpose(0,1),t_context.transpose(0,1)] #[(1*200),(1*200)...]
M_vec=torch.cat(zu,dim=0) # [3*200]
attn_energies = self.Wa(M_vec) #[3*200]*[200*1]=[3*1]
attn_energies = F.softmax(attn_energies, dim=0) # [3*1]
context = torch.bmm(M_vec.transpose(0,1).unsqueeze(0),attn_energies.unsqueeze(0)).squeeze(0)
# [1*200*3]*[1*3*1]=[1*200*1].squeeze(0)=[200*1]
p=self.cls(context.transpose(0,1)) #[1*2]
pred.append(p)
label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1
zu=[h_context.transpose(0,1),mask_context.unsqueeze(0)]  # [2*200]
negative=torch.cat(zu,dim=0).mean(0)  #[200]
neg=self.cls(negative.unsqueeze(0)) # [1*200]*[200*2]——>[1*2]
pred.append(neg)
label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
'''