# -*- coding: utf-8 -*-
"""
@Time ： 2024/4/1 9:31
@Auth ： fcq
@File ：BERT_MoE_Model.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.model import BertAttention, GateNet
from models.BERT_cross_attention import cross_attention_classify, cross_attention_classify_v2, BERT_with_attention, \
    BERT_with_attention_sentiX
from models.BERT_SPC import BERT_SPC, BERT_SPC_with_softmax


# MoEModel 是话题相关或者无关的
# MoEModel_step2 是情感的
# MoEModel_step3 是混合的
# MoEModel_final 是结合外部知识的
# MoEModel_final_tts 是结合教师学生网络的（最终模型）
class MoEModel(nn.Module):
    def __init__(self, opt, bert):
        super(MoEModel, self).__init__()
        self.bert = bert
        self.opt = opt
        self.attn_module1 = cross_attention_classify(opt)
        self.attn_module2 = cross_attention_classify(opt)

        self.gate_network = GateNet(opt.bert_dim, 2)

        self.experts = nn.ModuleList(
            [self.attn_module1, self.attn_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, gate_input = inputs[0], inputs[1], inputs[2], inputs[3]
        pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids, attention_mask=attention_mask)

        hidden_states = pooled_output[0]

        weights = self.gate_network(gate_input)

        outputs = torch.stack(
            [expert(hidden_states) for expert in self.experts], dim=2)

        weights_ = weights.unsqueeze(1).expand_as(outputs)
        return outputs, weights, torch.sum(outputs * weights_, dim=2)


class MoEModel_full(nn.Module):
    def __init__(self, opt, bert1, bert2):
        super(MoEModel_full, self).__init__()

        self.opt = opt
        self.attn_module1 = BERT_with_attention(opt, bert1)
        self.attn_module2 = BERT_with_attention(opt, bert2)

        self.gate_network = GateNet(opt.bert_dim, 2)

        self.experts = nn.ModuleList(
            [self.attn_module1, self.attn_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, gate_input = inputs[0], inputs[1], inputs[2], inputs[3]

        weights = self.gate_network(gate_input)

        outputs = torch.stack(
            [expert([inputs[0], inputs[1], inputs[2]]) for expert in self.experts], dim=2)

        weights_ = weights.unsqueeze(1).expand_as(outputs)
        return outputs, weights, torch.sum(torch.softmax(outputs, dim=1) * weights_, dim=2)


class MoEModel_full_gate_bert_sep(nn.Module):
    def __init__(self, opt, bert1, bert2, bert3):
        super(MoEModel_full_gate_bert_sep, self).__init__()

        self.opt = opt
        self.attn_module1 = BERT_with_attention(opt, bert1)
        self.attn_module2 = BERT_with_attention(opt, bert2)
        opt.num_labels = 2
        self.gate_network = BERT_SPC(opt, bert3)

        self.experts = nn.ModuleList(
            [self.attn_module1, self.attn_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask = inputs[0], inputs[1], inputs[2]

        weights, _ = self.gate_network(inputs)
        weights = torch.softmax(weights, dim=1)

        outputs = torch.stack(
            [expert([inputs[0], inputs[1], inputs[2]]) for expert in self.experts], dim=2)

        weights_ = weights.unsqueeze(1).expand_as(outputs)
        # weights_ = (1/2)*torch.ones(weights_.shape[0], weights_.shape[1], weights_.shape[2]).cuda()
        logits = torch.sum(torch.softmax(outputs, dim=1) * weights_, dim=2)
        # logits = (1/3)*torch.ones(logits.shape[0], logits.shape[1])
        return outputs, weights, logits


class MoEModel_full_gate_sentiment_aug(nn.Module):
    def __init__(self, opt, bert1, bert2, bert3):
        super(MoEModel_full_gate_sentiment_aug, self).__init__()

        self.opt = opt
        self.attn_module1 = BERT_with_attention(opt, bert1)
        self.attn_module2 = BERT_with_attention(opt, bert2)
        self.gate_network = BERT_SPC_with_softmax(opt, bert3)

        self.experts = nn.ModuleList(
            [self.attn_module1, self.attn_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, final_label_tensor = inputs[0], inputs[1], inputs[2], \
            inputs[3]

        pred_logits, _ = self.gate_network([inputs[0], inputs[1], inputs[2]])
        pred_logits = pred_logits.unsqueeze(1)
        weights = torch.matmul(pred_logits, final_label_tensor)

        outputs = torch.stack(
            [expert([inputs[0], inputs[1], inputs[2]]) for expert in self.experts], dim=2)

        weights_ = weights.expand_as(outputs)
        # weights_ = (1/2)*torch.ones(weights_.shape[0], weights_.shape[1], weights_.shape[2]).cuda()
        logits = torch.sum(torch.softmax(outputs, dim=1) * weights_, dim=2)

        # logits = (1/3)*torch.ones(logits.shape[0], logits.shape[1])
        return outputs, weights.squeeze(1), logits

    def eval_model(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, final_label_tensor = inputs[0], inputs[1], inputs[2], \
            inputs[3]

        pred_logits, _ = self.gate_network([inputs[0], inputs[1], inputs[2]])
        pred_logits = pred_logits.unsqueeze(1)
        weights = torch.matmul(pred_logits, final_label_tensor)
        weights = weights.squeeze(1)
        pred_weights = torch.argmax(weights, dim=1)

        weights_one_hot = F.one_hot(pred_weights.detach().cpu(),2).cuda()


        outputs = torch.stack(
            [expert([inputs[0], inputs[1], inputs[2]]) for expert in self.experts], dim=2)

        weights_ = weights_one_hot.unsqueeze(1).expand_as(outputs)
        # weights_ = (1/2)*torch.ones(weights_.shape[0], weights_.shape[1], weights_.shape[2]).cuda()
        logits = torch.sum(torch.softmax(outputs, dim=1) * weights_, dim=2)

        # logits = (1/3)*torch.ones(logits.shape[0], logits.shape[1])
        return outputs, weights.squeeze(1), logits

class MoEModel_full_gate_bert_sentiment(nn.Module):
    def __init__(self, opt, bert1, bert2, bert3):
        super(MoEModel_full_gate_bert_sentiment, self).__init__()

        self.opt = opt
        self.attn_module1 = BERT_with_attention(opt, bert1)
        self.attn_module2 = BERT_with_attention(opt, bert2)
        opt.num_labels = 2
        self.gate_network = BERT_with_attention_sentiX(opt, bert3)

        self.experts = nn.ModuleList(
            [self.attn_module1, self.attn_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, senti_hidden = inputs[0], inputs[1], inputs[2], inputs[3]

        weights = self.gate_network(inputs)
        weights = torch.softmax(weights, dim=1)

        outputs = torch.stack(
            [expert([inputs[0], inputs[1], inputs[2]]) for expert in self.experts], dim=2)

        weights_ = weights.unsqueeze(1).expand_as(outputs)
        return outputs, weights, torch.sum(torch.softmax(outputs, dim=1) * weights_, dim=2)


class MoEModel_step2_full(nn.Module):
    def __init__(self, opt, bert1, bert2):
        super(MoEModel_step2_full, self).__init__()
        self.opt = opt
        self.attention_module1 = BERT_with_attention_sentiX(opt, bert1)
        self.attention_module2 = BERT_with_attention_sentiX(opt, bert2)

        self.gate_network = GateNet(opt.bert_dim, 2)

        self.experts = nn.ModuleList(
            [self.attention_module1, self.attention_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, gate_input, sent_hidden = inputs[0], inputs[1], inputs[2], \
            inputs[3], inputs[4]

        weights = self.gate_network(gate_input)

        outputs = torch.stack(
            [expert([inputs[0], inputs[1], inputs[2], inputs[4]]) for expert in self.experts], dim=2)

        weights_ = weights.unsqueeze(1).expand_as(outputs)
        return outputs, weights, torch.sum(outputs * weights_, dim=2)


class MoEModel_step2(nn.Module):
    def __init__(self, opt, bert):
        super(MoEModel_step2, self).__init__()
        self.bert = bert
        self.opt = opt
        self.attn_module1 = cross_attention_classify(opt)
        self.attn_module2 = cross_attention_classify(opt)

        self.gate_network = GateNet(opt.bert_dim, 2)

        self.experts = nn.ModuleList(
            [self.attn_module1, self.attn_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, gate_input, senti_input = inputs[0], inputs[1], inputs[2], \
            inputs[3], inputs[4]
        pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids, attention_mask=attention_mask)

        hidden_states = pooled_output[0]
        hidden_states_final = torch.cat([hidden_states, senti_input], dim=1)

        weights = self.gate_network(gate_input)

        outputs = torch.stack(
            [expert(hidden_states_final) for idx, expert in enumerate(self.experts)], dim=2)

        weights_ = weights.unsqueeze(1).expand_as(outputs)
        return outputs, weights, torch.sum(outputs * weights_, dim=2)


class MoEModel_step3(nn.Module):
    def __init__(self, opt, bert):
        super(MoEModel_step3, self).__init__()
        self.bert = bert
        self.opt = opt
        self.attn_module1 = cross_attention_classify(opt)
        self.attn_module2 = cross_attention_classify(opt)

        self.gate_network = GateNet(opt.bert_dim, 2)

        self.experts = nn.ModuleList(
            [self.attn_module1, self.attn_module2])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids, attention_mask, gate_input, senti_input = inputs[0], inputs[1], inputs[2], \
            inputs[3], inputs[4]
        pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids, attention_mask=attention_mask)

        hidden_states = pooled_output[0]
        hidden_states_final = [hidden_states, torch.cat([hidden_states, senti_input], dim=1)]

        weights = self.gate_network(gate_input)

        outputs = torch.stack(
            [expert(hidden_states_final[idx]) for idx, expert in enumerate(self.experts)], dim=2)

        weights_ = weights.unsqueeze(1).expand_as(outputs)
        return outputs, weights, torch.sum(outputs * weights_, dim=2)
