# -*- coding: utf-8 -*-
"""
@Time ： 2024/3/30 21:06
@Auth ： fcq
@File ：BERT_cross_attention.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from models.model import BertAttention


class BERT_with_attention(nn.Module):
    def __init__(self, opt, bert, ):
        super(BERT_with_attention, self).__init__()
        self.bert = bert
        self.attention = cross_attention_classify(opt)

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask = inputs[0], inputs[1], inputs[2]
        pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids, attention_mask=attention_mask)

        hidden_states = pooled_output[0]
        logits = self.attention(hidden_states)

        return logits



class BERT_with_attention_sentiX(nn.Module):
    def __init__(self, opt, bert, ):
        super(BERT_with_attention_sentiX, self).__init__()
        self.bert = bert
        self.attention = cross_attention_classify(opt)

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, sent_hidden = inputs[0], inputs[1], inputs[2], inputs[3]
        pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids, attention_mask=attention_mask)

        hidden = pooled_output[0]

        hidden_states = torch.cat((hidden, sent_hidden), dim=1)
        logits = self.attention(hidden_states)

        return logits


class BERT_with_sentix_multitask(nn.Module):
    def __init__(self, opt, bert, ):
        super(BERT_with_sentix_multitask, self).__init__()
        self.bert = bert
        self.attention1 = cross_attention_classify(opt)
        self.attention2 = cross_attention_classify(opt)

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, sent_hidden = inputs[0], inputs[1], inputs[2], inputs[
            3]
        pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids,
                                  attention_mask=attention_mask)

        hidden = pooled_output[0]

        hidden_states = torch.cat((hidden, sent_hidden), dim=1)
        logits_1 = self.attention1(hidden_states)
        logits_2 = self.attention2(hidden_states)

        return logits_1, logits_2


class cross_attention_classify(nn.Module):
    def __init__(self, opt):
        super(cross_attention_classify, self).__init__()
        self.attention = nn.ModuleList([BertAttention(opt.bert_config) for _ in range(2)])

        self.dense = nn.Linear(opt.bert_dim, opt.num_labels)

    def forward(self, inputs):
        for i, layer_module in enumerate(self.attention):
            layer_outputs = layer_module(
                inputs)
            inputs = layer_outputs[0]

        feature = self.dense(inputs[:, 0, :])

        return feature

class cross_attention_classify_v2(nn.Module):
    def __init__(self, opt, bert):
        super(cross_attention_classify_v2, self).__init__()

        self.bert = bert
        self.attention = nn.ModuleList([BertAttention(opt.bert_config) for _ in range(2)])

        self.dense = nn.Linear(opt.bert_dim, opt.num_labels)

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, gate_input = inputs[0], inputs[1], inputs[2], inputs[3]
        pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids, attention_mask=attention_mask)

        hidden_states = pooled_output[0]
        for i, layer_module in enumerate(self.attention):
            layer_outputs = layer_module(
                hidden_states)
            hidden_states = layer_outputs[0]

        feature = self.dense(inputs[:, 0, :])

        return feature
