import torch
import torch.nn as nn
from transformers import (
    BertTokenizer,
    BertConfig,
    BertModel
)
import torch.nn.functional as F
import random,pickle,copy
import numpy as np
import os
device = torch.device("cuda")
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"

random.seed(2021)
class base_Model(nn.Module):
    def __init__(self, pretrained_path,config):
        super().__init__()
        self.bert  = BertModel.from_pretrained(pretrained_path)
        self.dense=nn.Linear(768*2,768*2)
        self.activation = nn.ReLU()
        self.cls=nn.Linear(768*2,2)
    def forward(self, input_ids, token_type_ids, attention_mask,orig,fenci,trip,appen,return_dict=None):
        out1 = self.bert(input_ids=input_ids, 
                        attention_mask=attention_mask, 
                        token_type_ids=token_type_ids,
                        return_dict=True)#[B*512*768],[2*768]
        last_hidden_state=out1.last_hidden_state # torch.Size([B, 512, 768])
        pooler_output=out1.pooler_output #torch.Size([B, 768])
        pred,label=[],[]
        # print('last_hidden_state.shape:',last_hidden_state.shape,last_hidden_state.size(0))
        # print('fenci:',fenci) # [[[0, 1], [2], [3, 4], [5], [6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17], [18, 19], [20]], [[0, 1], [2, 3], [4], [5], [6, 7, 8], [9, 10], [11, 12], [13], [14], [15, 16, 17], [18], [19, 20, 21, 22], [23]]]
        # print('trip:',trip)  #[[(4, 'sbj_HX', 'Exp', 2), (4, 'obj_HX', 'Clas', 11)], [(6, 'sbj_HX', 'Exp', 4), (8, 'sbj_HX', 'Exp', 4), (8, 'obj_HX', 'Clas', 9), (11, 'sbj_HX', 'Exp', 4)]]
        # print('orig:',orig)  # ['容量的大小即为特定海域自净能力强弱的指标。', '豆腐冷冻后，蛋白质冷冻变性而成冻豆腐，别有风味。']
        # print('appen:',appen)  # [[('为', 'sbj_HX', 'Exp', '大小'), ('为', 'obj_HX', 'Clas', '指标')], [('变性', 'sbj_HX', 'Exp', '蛋白质'), ('成', 'sbj_HX', 'Exp', '蛋白质'), ('成', 'obj_HX', 'Clas', '冻豆腐'), ('别有风味', 'sbj_HX', 'Exp', '蛋白质')]]
        for id in range(last_hidden_state.size(0)):
            for item in trip[id]:
                # print(item)
                h,r,t = fenci[id][item[0]],item[1],fenci[id][item[3]]
                # 取核心谓词head和搭配词tail
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                h_word=torch.cat(zu,dim=0).mean(0) #torch.Size([768])
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
                t_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,768])——>torch.Size([768])
                h_context=h_word.unsqueeze(0)  # [1*768]
                t_context=t_word.unsqueeze(0)  # [1*768]
                
                # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
                zu=[h_context,t_context] # [(1*768),(1*768)...]
                context=torch.cat(zu,dim=1) # [1*768*2] 拼接
                context=self.dense(context)
                context=self.activation(context)
                p=self.cls(context) # [1*(768*2)]*[(768*2)*2]——>[1*2]
                pred.append(p)
                label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1
                # 取负例子
                f=copy.deepcopy(fenci[id])
                f.remove(h)
                f.remove(t)  # 在除去h,t之外的取词作为负例 
                n_pred=min(5, max(1, int(len(f) * 0.15)))  # 随机取15%的负例：
                random.shuffle(f)
                for r in f[:n_pred]: 
                    zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in r] #[(1*768),(1*768)]
                    if len(zu) >0:
                        rand_word=torch.cat(zu,dim=0).mean(0)  # torch.Size([768])
                        zu=[h_context,rand_word.unsqueeze(0)]  # [(1*768),(1*768)...]
                        negative=torch.cat(zu,dim=1)  # [1*768*2] 拼接
                        negative=self.dense(negative)
                        negative=self.activation(negative)
                        n=self.cls(negative)  # [1*(768*2)]*[(768*2)*2]——>[1*2]
                        pred.append(n)
                        label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
        pred=torch.cat(pred,dim=0)  # [n*2]
        label=torch.tensor(label).cuda()
        return pred,label 
# class base_Model(nn.Module):
#     def __init__(self, pretrained_path,config):
#         super().__init__()
#         self.bert  = BertModel.from_pretrained(pretrained_path)
#         self.dense=nn.Linear(768*2,768*2)
#         self.activation = nn.ReLU()
#         self.cls=nn.Linear(768*2,2)
#     def forward(self, input_ids, token_type_ids, attention_mask,orig,fenci,trip,appen,return_dict=None):
#         out1 = self.bert(input_ids=input_ids, 
#                         attention_mask=attention_mask, 
#                         token_type_ids=token_type_ids,
#                         return_dict=True)#[B*512*768],[2*768]
#         last_hidden_state=out1.last_hidden_state # torch.Size([B, 512, 768])
#         pooler_output=out1.pooler_output #torch.Size([B, 768])
#         pred,label=[],[]
#         # print('last_hidden_state.shape:',last_hidden_state.shape,last_hidden_state.size(0))
#         # print('fenci:',fenci) # [[[0, 1], [2], [3, 4], [5], [6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17], [18, 19], [20]], [[0, 1], [2, 3], [4], [5], [6, 7, 8], [9, 10], [11, 12], [13], [14], [15, 16, 17], [18], [19, 20, 21, 22], [23]]]
#         # print('trip:',trip)  #[[(4, 'sbj_HX', 'Exp', 2), (4, 'obj_HX', 'Clas', 11)], [(6, 'sbj_HX', 'Exp', 4), (8, 'sbj_HX', 'Exp', 4), (8, 'obj_HX', 'Clas', 9), (11, 'sbj_HX', 'Exp', 4)]]
#         # print('orig:',orig)  # ['容量的大小即为特定海域自净能力强弱的指标。', '豆腐冷冻后，蛋白质冷冻变性而成冻豆腐，别有风味。']
#         # print('appen:',appen)  # [[('为', 'sbj_HX', 'Exp', '大小'), ('为', 'obj_HX', 'Clas', '指标')], [('变性', 'sbj_HX', 'Exp', '蛋白质'), ('成', 'sbj_HX', 'Exp', '蛋白质'), ('成', 'obj_HX', 'Clas', '冻豆腐'), ('别有风味', 'sbj_HX', 'Exp', '蛋白质')]]
#         for id in range(last_hidden_state.size(0)):  # 遍历每一条句子
#             dic = {}  # {4: ['Exp_2','Cls_11']}
#             for item in trip[id]:
#                 dic.setdefault(item[0],[]).append(item[2] + '_' + str(item[3]))
#             for item in dic:  # {4: ['Exp_2','Cla_11']}
#                 p_id = item
#                 arguments = []
#                 arguments.append(fenci[id][p_id])
#                 for key in dic[item]:
#                     role, ar_id = key.split('_')[0], int(key.split('_')[1])
#                     h, r, t = fenci[id][p_id], role, fenci[id][ar_id]
#                     arguments.append(t)
#                     # print(h,r,t)  # [21] Exp [17, 18, 19]
#                     # 取核心谓词head和搭配词tail
#                     zu = [last_hidden_state[id][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
#                     h_word = torch.cat(zu,dim=0).mean(0) #torch.Size([768])
#                     zu = [last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
#                     t_word = torch.cat(zu,dim=0).mean(0) #torch.Size([n,768])——>torch.Size([768])
#                     h_context = h_word.unsqueeze(0)  # [1*768]
#                     t_context = t_word.unsqueeze(0)  # [1*768]
                    
#                     # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
#                     zu=[h_context,t_context] # [(1*768),(1*768)...]
#                     context=torch.cat(zu,dim=1) # [1*768*2] 拼接
#                     context=self.dense(context)
#                     context=self.activation(context)
#                     p=self.cls(context) # [1*(768*2)]*[(768*2)*39]——>[1*39]
#                     pred.append(p)
#                     label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1
#                 f=copy.deepcopy(fenci[id])  # 取负例子
#                 # print("arguments:",arguments)
#                 # print('f:', f)
#                 for c in arguments:
#                     if c in f:
#                         f.remove(c)  
#                 n_pred=min(5, max(1, int(len(f) * 0.15)))  # 随机取15%的负例：
#                 random.shuffle(f)
#                 for r in f[:n_pred]: 
#                     zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in r]  # [(1*768),(1*768)]
#                     if len(zu) >0:
#                         rand_word = torch.cat(zu,dim=0).mean(0)  # torch.Size([768])
#                         zu = [h_context,rand_word.unsqueeze(0)]  # [(1*768),(1*768)...]
#                         negative = torch.cat(zu,dim=1)  # [1*768*2] 拼接
#                         negative = self.dense(negative)
#                         negative = self.activation(negative)
#                         n = self.cls(negative)  # [1*(768*2)]*[(768*2)*2]——>[1*2]
#                         pred.append(n)
#                         label.append(0)  # 负例和CLS拼接之后分类得分，lable=0   
#         if len(pred)>0:
#             pred=torch.cat(pred,dim=0)  # [n*39]
#             label=torch.tensor(label).cuda()
#         # print("pred.shape, label.shape",pred.shape, label.shape)
#         return pred,label 
    
class_num = ['Reas' , 'Cons' , 'CaiLiao' , 'Stat' , 'Sco' , 'GongJu' , 'Dir' , 'YuanDian' , 'Time' , 'YiJu' , 'Quan' , 'FangShi' , 'ZhongDian' , 'LinTi' , 'Aft' , 'dsbj' , 'Prod' , 'Poss' , 'Loc' , 'Belg' , 'Datv' , 'Pat' , 'Clas' , 'Cont' , 'dobj' , 'Agt' , 'Exp' ]
class_id = {class_num[i] : i for i in range(len(class_num))}
    
class role_Model(nn.Module):
    def __init__(self, pretrained_path,config):
        super().__init__()
        self.bert  = BertModel.from_pretrained(pretrained_path)
        self.dense=nn.Linear(768*2, 768*2)
        self.activation = nn.ReLU()
        self.cls=nn.Linear(768*2,27)
    def forward(self, input_ids, token_type_ids, attention_mask,orig,fenci,trip,appen,return_dict=None):
        out1 = self.bert(input_ids=input_ids, 
                        attention_mask=attention_mask, 
                        token_type_ids=token_type_ids,
                        return_dict=True)  # [B*512*768],[2*768]
        last_hidden_state=out1.last_hidden_state # torch.Size([B, 512, 768])
        pooler_output=out1.pooler_output #torch.Size([B, 768])
        pred,label=[],[]
        # print('last_hidden_state.shape:',last_hidden_state.shape,last_hidden_state.size(0))
        # print('fenci:',fenci)
        # print('trip:',trip)
        # print('orig:',orig)
        # print('appen:',appen)
        for id in range(last_hidden_state.size(0)):
            for item in trip[id]:  # (18, 'sbj_HX', 'Agt', 17)
                h,r,t=fenci[id][item[0]],item[2],fenci[id][item[-1]]
                # print(h,r,t)  # [21] Exp [17, 18, 19]
                # 取核心谓词head和搭配词tail
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
                h_word=torch.cat(zu,dim=0).mean(0) #torch.Size([768])
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
                t_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,768])——>torch.Size([768])
                h_context=h_word.unsqueeze(0)  # [1*768]
                t_context=t_word.unsqueeze(0)  # [1*768]
                
                # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
                zu=[h_context,t_context] # [(1*768),(1*768)...]
                context=torch.cat(zu,dim=1) # [1*768*2] 拼接
                context=self.dense(context)
                context=self.activation(context)
                p=self.cls(context) # [1*(768*2)]*[(768*2)*39]——>[1*39]
                pred.append(p)
                label.append(class_id[r])  # 依存三元组注意力加权之后的分类得分，label=1
        if len(pred)>0:
            pred=torch.cat(pred,dim=0)  # [n*39]
            label=torch.tensor(label).cuda()
        # print("pred.shape, label.shape",pred.shape, label.shape)
        return pred,label  
    
    
    
# class role_Model(nn.Module):
#     def __init__(self, pretrained_path,config):
#         super().__init__()
#         self.bert  = BertModel.from_pretrained(pretrained_path)
#         self.dense=nn.Linear(768*2,768*2)
#         self.activation = nn.ReLU()
#         self.cls=nn.Linear(768*2,28)
#     def forward(self, input_ids, token_type_ids, attention_mask,orig,fenci,trip,appen,return_dict=None):
#         out1 = self.bert(input_ids=input_ids, 
#                         attention_mask=attention_mask, 
#                         token_type_ids=token_type_ids,
#                         return_dict=True)  # [B*512*768],[2*768]
#         last_hidden_state=out1.last_hidden_state # torch.Size([B, 512, 768])
#         pooler_output=out1.pooler_output #torch.Size([B, 768])
#         pred,label=[],[]
#         # print('last_hidden_state.shape:',last_hidden_state.shape,last_hidden_state.size(0))
#         # print('fenci:',fenci) # [[[0, 1], [2], [3, 4], [5], [6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17], [18, 19], [20]], [[0, 1], [2, 3], [4], [5], [6, 7, 8], [9, 10], [11, 12], [13], [14], [15, 16, 17], [18], [19, 20, 21, 22], [23]]]
#         # print('trip:',trip)  #[[(4, 'sbj_HX', 'Exp', 2), (4, 'obj_HX', 'Clas', 11)], [(6, 'sbj_HX', 'Exp', 4), (8, 'sbj_HX', 'Exp', 4), (8, 'obj_HX', 'Clas', 9), (11, 'sbj_HX', 'Exp', 4)]]
#         # print('orig:',orig)  # ['容量的大小即为特定海域自净能力强弱的指标。', '豆腐冷冻后，蛋白质冷冻变性而成冻豆腐，别有风味。']
#         # print('appen:',appen)  # [[('为', 'sbj_HX', 'Exp', '大小'), ('为', 'obj_HX', 'Clas', '指标')], [('变性', 'sbj_HX', 'Exp', '蛋白质'), ('成', 'sbj_HX', 'Exp', '蛋白质'), ('成', 'obj_HX', 'Clas', '冻豆腐'), ('别有风味', 'sbj_HX', 'Exp', '蛋白质')]]
#         for id in range(last_hidden_state.size(0)):  # 遍历每一条句子
#             dic = {}  # {4: ['Exp_2','Cls_11']}
#             for item in trip[id]:
#                 dic.setdefault(item[0],[]).append(item[2] + '_' + str(item[3]))
#             for item in dic:  # {4: ['Exp_2','Cla_11']}
#                 p_id = item
#                 arguments = []
#                 arguments.append(fenci[id][p_id])
#                 for key in dic[item]:
#                     role, ar_id = key.split('_')[0], int(key.split('_')[1])
#                     h, r, t = fenci[id][p_id], role, fenci[id][ar_id]
#                     arguments.append(t)
#                     # print(h,r,t)  # [21] Exp [17, 18, 19]
#                     # 取核心谓词head和搭配词tail
#                     zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in h] #[(1*768),(1*768)]
#                     h_word=torch.cat(zu,dim=0).mean(0) #torch.Size([768])
#                     zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
#                     t_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,768])——>torch.Size([768])
#                     h_context=h_word.unsqueeze(0)  # [1*768]
#                     t_context=t_word.unsqueeze(0)  # [1*768]
                    
#                     # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
#                     zu=[h_context,t_context] # [(1*768),(1*768)...]
#                     context=torch.cat(zu,dim=1) # [1*768*2] 拼接
#                     context=self.dense(context)
#                     context=self.activation(context)
#                     p=self.cls(context) # [1*(768*2)]*[(768*2)*39]——>[1*39]
#                     pred.append(p)
#                     label.append(class_id[r])  # 依存三元组注意力加权之后的分类得分，label=1
#                 f=copy.deepcopy(fenci[id])  # 取负例子
#                 # print("arguments:",arguments)
#                 # print('f:', f)
#                 for c in arguments:
#                     if c in f:
#                         f.remove(c)  
#                 n_pred=min(5, max(1, int(len(f) * 0.15)))  # 随机取15%的负例：
#                 random.shuffle(f)
#                 for r in f[:n_pred]: 
#                     zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in r] #[(1*768),(1*768)]
#                     if len(zu) >0:
#                         rand_word=torch.cat(zu,dim=0).mean(0)  # torch.Size([768])
#                         zu=[h_context,rand_word.unsqueeze(0)]  # [(1*768),(1*768)...]
#                         negative=torch.cat(zu,dim=1)  # [1*768*2] 拼接
#                         negative=self.dense(negative)
#                         negative=self.activation(negative)
#                         n=self.cls(negative)  # [1*(768*2)]*[(768*2)*2]——>[1*2]
#                         pred.append(n)
#                         label.append(27)  # 负例和CLS拼接之后分类得分，lable=0   
#         if len(pred)>0:
#             pred=torch.cat(pred,dim=0)  # [n*39]
#             label=torch.tensor(label).cuda()
#         # print("pred.shape, label.shape",pred.shape, label.shape)
#         return pred,label  
    