import torch
import torch.nn as nn
from transformers import (
    BertTokenizer,
    BertConfig,
    PreTrainedTokenizerFast,
    AutoTokenizer,
    AutoModel,
    BertForNextSentencePrediction,
    BertModel
)
import torch.nn.functional as F
import random,pickle
import numpy as np
import copy
tokenizer = BertTokenizer.from_pretrained('chinese_L-12_H-768_A-12')
random.seed(2021)

def get_sememe_vec():
    sememe_vec=dict()
    with open('data/sememe-vec.txt','r',encoding='utf-8') as file:
        content=file.readlines()
        for item in content[1:]:
            w=item.strip().split()[0]
            v=[float(i) for i in item.strip().split()[1:]]
            sememe_vec[w]=v
    with open(r"data/word2def.pkl",'rb') as file:
        word2def=pickle.load(file)
    with open(r"data/def2word.pkl",'rb') as file:
        def2word=pickle.load(file)
    return sememe_vec,word2def,def2word

class my_Model(nn.Module):
    def __init__(self, pretrained_path,config):
        super().__init__()
        self.bert  = BertModel.from_pretrained(pretrained_path)
        self.L=nn.Linear(config.hidden_size,200)
        self.W=nn.Linear(200,200)
        self.Wa=nn.Linear(200,1)
        self.sememe_vec,self.word2def,self.def2word=get_sememe_vec()
        self.cls=nn.Linear(200,2)
        self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
        
    def forward(self, input_ids, token_type_ids, attention_mask,batch3,batch4, return_dict=None):
        out1 = self.bert(input_ids=input_ids, 
                        attention_mask=attention_mask, 
                        token_type_ids=token_type_ids,
                        return_dict=True)#[B*512*768],[2*768]
        last_hidden_state=self.L(out1.last_hidden_state) # torch.Size([B, 512, 200])
        pooler_output=self.L(out1.pooler_output) #torch.Size([B, 200])
        id=0
        pred,label=[],[]
        for index,word,x,y,z in zip(batch3,batch4,input_ids, token_type_ids, attention_mask): # 每一句话
            fenci=[]
            for item in index: # [[0,1],[3],[4,5]...]
                zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in item] #[(1*200),(1*200)]
                word=torch.cat(zu,dim=0).mean(0) #torch.Size([200])  # 从字向量组合成词向量
                fenci.append(word.unsqueeze(0))  # [(1*200),(1*200)]
            sentence=torch.cat(word,dim=0)  # [n*200]
            s=self.W(sentence)
            M=torch.bmm(s.unseqeeze(0),s.transpose(0,1).unseqeeze(0)).squeeze(0)  # [1*n*200]*[1*200*n]=[1*n*n]
            label=matrix
            '''
            
            li=[[0,1],[2],[3],[4,5],[6,7,8],[9,10],[11]]
            label=[(0,2),(4,6)]
            print(len(li))
            test = [[0] * len(li) for _ in range(len(li))]
            print(test)
            for i in range(len(test)):
                for j in range(len(test)):
                    for item in label:
                        h,t=item[0],item[1]
                        if (h==i and t==j) or (h==j and t==i):
                            test[i][j]=1
            print(test)

            import torch
            a=torch.tensor([[1.0,2.0,3.0],[2.0,3.0,4.0]])
            print(a.flatten())
            print(torch.from_numpy(np.array(test)).flatten())
            
            
            '''



                # mask_id=tokenizer.convert_tokens_to_ids(tokenizer.mask_token) == 103
                # input=torch.from_numpy(np.array([x.cpu()[i] if i-1 not in t else 103 for i in range(len(x))]))
                # out2 = self.bert(input_ids=input.unsqueeze(0).cuda(), 
                #                 attention_mask=z.unsqueeze(0), 
                #                 token_type_ids=y.unsqueeze(0),
                #                 return_dict=True)
                # last=self.L(out2.last_hidden_state)  # [1,512,200]
                # zu=[last[0][idx+1].unsqueeze(0) for idx in t]
                # mask_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,200])——>torch.Size([200])
               
                # 制造负例
                # t=[i if (i-1)<=0 else i-1 for i in t]
                # zu=[last_hidden_state[id][idx+1].unsqueeze(0) for idx in t]
                # nagetive_word=torch.cat(zu,dim=0).mean(0) #torch.Size([n,200])——>torch.Size([200])
                
                h_def=self.word2def.get(w[0],[])  # 获得核心谓词的义原def
                d=[]
                for k in h_def:
                    s_vec=self.sememe_vec.get(k,0)  # 获得义原词向量
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))
                if d==[]:
                    h_context=h_word.unsqueeze(-1) #[200*1]  # 如果没有义原词向量，则词向量为核心词本身
                else:                                        # 否则核心谓词和义原词计算注意力加权之后获得新的核心谓词向量表示
                    def_=torch.cat(d,dim=0).cuda()  # [n*200]
                    def_ = torch.tensor(def_, dtype=torch.float32)
                    # print(def_.unsqueeze(0).shape, h_word.unsqueeze(-1).unsqueeze(0).shape)
                    # [1*n*200]*[1*200*1]=[1*n*1].squeeze(0)=[n*1]
                    attn_weights = torch.bmm(def_.unsqueeze(0),h_word.unsqueeze(-1).unsqueeze(0)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    h_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                    # 得到义原注意力加权之后的向量 再 和原本的核心谓词向量加和平均 后作为最终的表示
                    h_context=torch.cat([h_context.transpose(0,1),h_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    # [200*1]
                    
                
                t_def=self.word2def.get(w[2],[])
                d=[]
                for k in t_def:
                    s_vec=self.sememe_vec.get(k,0)
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))
                if d==[]:
                    t_context=t_word.unsqueeze(-1) #[200*1]
                else:
                    def_=torch.cat(d,dim=0).cuda()  # [3*200]
                    def_ = torch.tensor(def_, dtype=torch.float32)
                    attn_weights = torch.bmm(def_.unsqueeze(0),t_word.unsqueeze(-1).unsqueeze(0)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    t_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                    t_context=torch.cat([t_context.transpose(0,1),t_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    # [200*1]
                
                # 取负样本，跟搭配词有共同义原的词s,随机取一个，获得其义原s，和mask的embedding做attention 
                n_def=[]
                if t_def!=[]:
                    for item in t_def:  # 搭配词的义原，取其中一个
                        neg=self.def2word.get(item,[])
                        if neg !=[]: # 随机取一个词，使用他的义原s
                            n_def=self.word2def.get(neg[random.randint(0,len(neg))-1],[])
                        if n_def!=[]:
                            break
                d=[]
                for k in n_def:
                    s_vec=self.sememe_vec.get(k,0)
                    if s_vec==0:
                        pass
                    else:
                        s_vec=torch.from_numpy(np.array(s_vec))
                        d.append(s_vec.unsqueeze(0))  # [1*200] [1*200]
                if d==[]:
                    mask_context=t_word.unsqueeze(-1) #[200*1]
                else:
                    def_=torch.cat(d,dim=0).cuda()  # [3*200]
                    def_ = torch.tensor(def_, dtype=torch.float32)
                    attn_weights = torch.bmm(def_.unsqueeze(0),t_word.unsqueeze(-1).unsqueeze(0)).squeeze(0)
                    # attn_weights = torch.bmm(def_.unsqueeze(0),mask_word.unsqueeze(-1).unsqueeze(0)).squeeze(0) 
                    soft_attn_weights = F.softmax(attn_weights, dim=0) #[n*1]
                    mask_context = torch.bmm(def_.transpose(0,1).unsqueeze(0), soft_attn_weights.unsqueeze(0)).squeeze(0)
                    # [1*200*n]*[1*n*1]=[1*200*1].squeeze(0)=[200*1]
                    # mask_context=torch.cat([mask_context.transpose(0,1),mask_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    mask_context=torch.cat([mask_context.transpose(0,1),t_word.unsqueeze(0)],dim=0).mean(0).unsqueeze(-1)
                    # [200*1]
                          
                    
                # 把两个en词直接相加和作为表示向量，送入下一个阶段计算
                # print(out.pooler_output[0].unsqueeze(0).shape,h_context.transpose(0,1).shape,t_context.transpose(0,1).shape)
                zu=[h_context.transpose(0,1),t_context.transpose(0,1)] #[(1*200),(1*200)...]
                context=torch.cat(zu,dim=0).sum(0) # [2*200]——>[200] 加和
                p=self.cls(context.unsqueeze(0)) #[1*200]*[200,2]——>[1*2]
                pred.append(p)
                label.append(1)  # 依存三元组注意力加权之后的分类得分，label=1
                
                zu=[h_context.transpose(0,1),mask_context.transpose(0,1)]  # [2*200]
                negative=torch.cat(zu,dim=0).sum(0)  #[200]
                n=self.cls(negative.unsqueeze(0)) # [1*200]*[200*2]——>[1*2]
                pred.append(n)
                label.append(0)  # 负例和CLS拼接之后分类得分，lable=0
            id+=1
        pred=torch.cat(pred,dim=0)  # [n*2]
        return pred,label
    
    
    
    
    
    
    
    
    
    