# -*- coding: utf-8 -*-
"""
@Time ： 2022/9/27 9:09
@Auth ： xlwreally
@File ：model.py
@IDE ：PyCharm
"""
import numpy as np
import torch
from torch import nn
from transformers import BertModel


class SelfAttention(torch.nn.Module):
    def __init__(self,args):
        super(SelfAttention, self).__init__()
        self.wv_dim = args.wv_dim

        self.maxlen = args.text_size
        self.W = nn.Parameter(torch.empty(size=(args.wv_dim,args.f)))
        nn.init.kaiming_uniform_(self.W.data)
        self.f=args.f
        self.V = nn.Parameter(torch.empty(size=(args.f,1)))
        nn.init.kaiming_uniform_(self.V.data)
        self.attention_softmax = torch.nn.Softmax(dim=-1)
        self.dropout = nn.Dropout()

    def forward(self, input_embeddings):
        tanh_W_B=torch.tanh(torch.matmul(input_embeddings,self.W))
        inside_softmax=torch.matmul(tanh_W_B,self.V).squeeze(2)
        a=self.attention_softmax(inside_softmax)

        return self.dropout(a)

class Fusion(torch.nn.Module):
    def __init__(self, args):
        super(Fusion, self).__init__()
        # self.W3 = nn.Parameter(torch.empty(size=(args.m,args.bert_dim)))
        # nn.init.kaiming_uniform_(self.W3.data)
        # self.W2 = nn.Parameter(torch.empty(size=(args.bert_dim, args.m)))
        # nn.init.kaiming_uniform_(self.W2.data)
        # self.W4 = nn.Parameter(torch.empty(size=(args.bert_dim,args.u)))
        # nn.init.kaiming_uniform_(self.W4.data)
        # self.W5 = nn.Parameter(torch.empty(size=(args.wv_dim,args.u)))
        # nn.init.kaiming_uniform_(self.W5.data)
        self.dropout = nn.Dropout()
        self.linear_w3 = nn.Linear(args.m,args.bert_dim)
        self.linear_w2 = nn.Linear(args.bert_dim, args.m)
        self.linear_w4 = nn.Linear(args.bert_dim,args.u)
        self.linear_w5 = nn.Linear(args.wv_dim,args.u)


    def forward(self,cls, h1):
        r1=self.linear_w3(torch.tanh(self.linear_w2(cls)))
        # r1=torch.matmul(torch.tanh(torch.matmul(cls,self.W2)),self.W3)
        # x1 = torch.tanh(torch.cat((self.linear_w4(r1), self.linear_w5(h1)),-1))
        x1 = torch.tanh(torch.add(self.linear_w4(r1), self.linear_w5(h1)))
        return self.dropout(x1)


class WML(torch.nn.Module):


    def __init__(self,args):

        super(WML, self).__init__()
        self.encoder_only = False
        self.WCL_loss_size=args.WCL_loss_size
        self.sim=nn.CosineSimilarity(dim=-1, eps=1e-6)
        self.attention = SelfAttention(args)
        self.Fusion=Fusion(args)
        self.Bert = BertModel.from_pretrained(args.bert)
        self.softmax = nn.Softmax(dim=-1)
        self.linear = nn.Linear(args.u, 2, bias=True)



    def forward(self, vector,glove_embedding):
        bert_embeddings = self.Bert( input_ids=vector['input_ids'].squeeze(1),
                                     attention_mask=vector['attention_mask'].squeeze(1),
                                     token_type_ids=vector['token_type_ids'].squeeze(1))

        attention_weights = self.attention(glove_embedding)
        h=torch.matmul(attention_weights.unsqueeze(1),glove_embedding).squeeze(1)
        x1=self.Fusion(bert_embeddings[0][:, 0, :].squeeze(1),h)
        x1_linear=self.linear(x1)
        out=self.softmax(x1_linear)

        if self.encoder_only:
            return attention_weights


        return attention_weights,x1,out


    def PL_loss(self,wml_attention,ABAE_attention,neg_ABAE_attention,t_PL):
        neg_sim=self.sim(wml_attention,neg_ABAE_attention)
        pos_sim = self.sim(wml_attention, ABAE_attention)
        t_neg_sim=torch.div(neg_sim, t_PL)
        t_pos_sim = torch.div(pos_sim, t_PL)
        exp_t_neg_sim=torch.exp(t_neg_sim)
        exp_t_pos_sim=torch.exp(t_pos_sim)
        exp_t_sim_add=torch.add(exp_t_pos_sim,exp_t_neg_sim)
        inside_ln=torch.div(exp_t_pos_sim,exp_t_sim_add)
        ln=torch.log(inside_ln)
        loss=-torch.sum(ln)
        return loss

