# -*- coding: utf-8 -*-
"""
@Time ： 2024/4/21 20:32
@Auth ： fcq
@File ：BERT_ConceptNet.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""

import torch
import torch.nn as nn
import torch.nn.functional as F


class Bert_ConceptNet(nn.Module):
    def __init__(self, opt, bert):
        super(Bert_ConceptNet, self).__init__()
        self.bert = bert
        self.opt = opt

        self.g1 = nn.Linear(100, 100)
        self.g1_drop = nn.Dropout(0.1)
        # decode
        self.d1 = nn.Linear(100, 100)
        self.d2 = nn.Linear(100, 100)
        self.d_drop = nn.Dropout(0.1)
        self.sent_cls = nn.Linear(opt.bert_dim + 100, opt.num_labels)

    def encode2(self, x2):
        x2 = self.g1(x2)
        x2 = F.relu(x2)
        x2 = self.g1_drop(x2)
        return x2

    def decode(self, z):
        z = self.d1(z)
        z = F.relu(z)
        z = self.d_drop(z)
        z = self.d2(z)
        return z

    def loss_ae(self, recon_x, x):
        dim = x.size(1)
        MSE = F.mse_loss(recon_x, x.view(-1, dim), reduction='mean')
        return MSE

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, graph_feature = inputs[0], inputs[1], inputs[2], inputs[3]

        outputs = self.bert(input_ids=text_bert_indices, attention_mask=attention_mask,
                            token_type_ids=bert_segments_ids)
        hidden = outputs[0][:, 0, :]

        z2 = self.encode2(graph_feature)
        reconstruct = self.decode(z2)
        # f = torch.cat((w1,w2,z2),dim = 1)
        f = torch.cat((hidden, z2), dim=1)
        recon_loss = self.loss_ae(reconstruct,graph_feature)

        sent_logits = self.sent_cls(f)

        if len(sent_logits.shape) == 1:
            sent_logits = sent_logits.unsqueeze(0)
        # sent_loss = self.sent_loss(sent_logits, sent_labels)
        # loss =sent_loss+recon_loss
        # f = z2
        return sent_logits, recon_loss

class Bert_conceptNet_module(nn.Module):
    def __init__(self, opt, bert):
        super(Bert_conceptNet_module, self).__init__()
        self.bert = bert
        self.opt = opt

        self.g1 = nn.Linear(100, 100)
        self.g1_drop = nn.Dropout(0.1)
        # decode
        self.d1 = nn.Linear(100, 100)
        self.d2 = nn.Linear(100, 100)
        self.d_drop = nn.Dropout(0.1)
        self.sent_cls = nn.Linear(opt.bert_dim + 100, opt.num_labels)

    def encode2(self, x2):
        x2 = self.g1(x2)
        x2 = F.relu(x2)
        x2 = self.g1_drop(x2)
        return x2

    def decode(self, z):
        z = self.d1(z)
        z = F.relu(z)
        z = self.d_drop(z)
        z = self.d2(z)
        return z

    def loss_ae(self, recon_x, x):
        dim = x.size(1)
        MSE = F.mse_loss(recon_x, x.view(-1, dim), reduction='mean')
        return MSE

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, graph_feature = inputs[0], inputs[1], inputs[2], inputs[3]

        outputs = self.bert(input_ids=text_bert_indices, attention_mask=attention_mask,
                            token_type_ids=bert_segments_ids)
        hidden = outputs[0][:, 0, :]

        z2 = self.encode2(graph_feature)
        reconstruct = self.decode(z2)
        # f = torch.cat((w1,w2,z2),dim = 1)
        f = torch.cat((hidden, z2), dim=1)
        # recon_loss = self.loss_ae(reconstruct,graph_feature)

        sent_logits = self.sent_cls(f)

        if len(sent_logits.shape) == 1:
            sent_logits = sent_logits.unsqueeze(0)
        # sent_loss = self.sent_loss(sent_logits, sent_labels)
        # loss =sent_loss+recon_loss
        # f = z2
        return sent_logits