import torch
import torch.nn as nn

from transformers import BertModel
from model.attention import MultiHeadAttention
from model.layernorm import LayerNorm
from MutiHeadAttCore import MultiHeadAttPredictor, LinearPredictor


class ComModel(nn.Module):
    def __init__(self, args):
        super().__init__()
        local_bert_model_path = "../bert_model/" + args.bert_type  # 这里的路径是相对于项目的根路径
        # 由于没有在 main 方法里，这里使用 ../ 这样的相对路径会出现问题，但是使用基于项目根路径的 path 是可以的
        self.embed_bert_model = BertModel.from_pretrained(local_bert_model_path)
        self.embed_dropout = nn.Dropout(0.1)
        embed_size = self.embed_bert_model.config.hidden_size  # 768，为什么是 768 呢？768 = 4 * 4 * 4 * 4 * 3

        self.a_rep = nn.Linear(embed_size, embed_size, bias=False)
        self.a_rep1 = nn.Linear(embed_size, embed_size, bias=False)
        self.a_rep2 = nn.Linear(embed_size, embed_size, bias=False)
        self.o1_rep = nn.Linear(embed_size, embed_size, bias=False)
        self.o2_rep = nn.Linear(embed_size, embed_size, bias=False)
        self.a1_ffn = LinearPredictor()
        self.a2_ffn = LinearPredictor()
        self.o1_predictor = MultiHeadAttPredictor()
        self.o2_predictor = MultiHeadAttPredictor()

        self.s_att = MultiHeadAttention(2, 768, 768, 768, 768)
        self.s_att2 = MultiHeadAttention(2, 768, 768, 768, 768)
        self.s_norm = LayerNorm(768)  # s_norm[batch_size, sentence_seq_len, 768]
        self.layer_dropout = nn.Dropout(0.1)

        self.s_ffn = nn.Sequential(
            nn.Linear(embed_size, 3)
        )

        self.is_on_ffn = nn.Sequential(
            nn.Linear(4 * embed_size, 2),
        )
        pass

    def get_aspect(self, inputs, args):
        bert_feature = self.embed_bert_model(**inputs).last_hidden_state  # (batch_size, seq_len, 768)
        bert_feature = self.embed_dropout(bert_feature)  # bert_feature 的 size 应该不会发生什么变化

        sentence_part_feature = bert_feature[:, args.sen_pre_len:, :]  # 提取出每个句子排除掉 query 的部分

        # aspect
        a_rep = self.a_rep(sentence_part_feature)  # a_rep 是一个 nn.Liner() 输入为 768 输出为 768，a_rep 的维度是 (batch_size, sentence_seq_len, 768)
        a1_s, a1_e = self.a1_ffn(a_rep)
        a2_s, a2_e = self.a2_ffn(a_rep)

        return a1_s, a1_e, a2_s, a2_e, a_rep, sentence_part_feature

    def forward(self, inputs, args, plus=None):
        if plus is None:  # plus 是在 test 的时候由外部传入的 get_aspect() 的结果
            a1_s, a1_e, a2_s, a2_e, a_rep, sentence_part_feature = self.get_aspect(inputs, args)
            pass
        else:
            a1_s, a1_e, a2_s, a2_e, a_rep, sentence_part_feature = plus
            pass

        # opinion
        o1_rep = self.o1_rep(sentence_part_feature)
        o2_rep = self.o2_rep(sentence_part_feature)
        o1_s, o1_e = self.o1_predictor(o1_rep, a_rep, sentence_part_feature)
        o2_s, o2_e = self.o2_predictor(o2_rep, a_rep, sentence_part_feature)

        # sentiment
        s1_y = self.s_att(sentence_part_feature, a_rep, o1_rep)  # s_att 是多头注意力模块，这三个输入以及输出 # s_att[batch_size, sentence_seq_len, 768]
        s2_y = self.s_att2(sentence_part_feature, a_rep, o2_rep)  # s_att 是多头注意力模块，这三个输入以及输出 # s_att[batch_size, sentence_seq_len, 768]
        s1_x = self.s_norm(self.layer_dropout(sentence_part_feature + s1_y))  # s_x 的 shape(batch_size, sentence_seq_len, 768)
        s2_x = self.s_norm(self.layer_dropout(sentence_part_feature + s2_y))  # s_x 的 shape(batch_size, sentence_seq_len, 768)
        s1_x = torch.max(s1_x, dim=1)[0]  # s_x[batch_size, 768]
        s2_x = torch.max(s2_x, dim=1)[0]  # s_x[batch_size, 768]
        s_logits = self.s_ffn(s1_x)  # s_logits[batch_size, 3]
        s_logits2 = self.s_ffn(s2_x)  # s_logits[batch_size, 3]

        # is_on
        a_x_pooling = torch.max(a_rep, dim=1)[0]  # a_rep 的 shape=(batch_size, sentence_seq_len, 768)，a_x_pooling 的 shape=（batch_size, 768）
        o1_x_pooling = torch.max(o1_rep, dim=1)[0]  # 同上
        o2_x_pooling = torch.max(o2_rep, dim=1)[0]  # 同上
        is_on_logits = self.is_on_ffn(torch.cat((sentence_part_feature[:, 0, :], a_x_pooling, o1_x_pooling, o2_x_pooling), 1))
        # bert_feature 是 (batch_size, seq_len, 768)
        # 所以 is_on_logits 的维度是 (batch_size, 2)

        return {
            'as_p': a1_s,  # (batch_size, sentence_seq_len, 1)
            'ae_p': a1_e,  # (batch_size, sentence_seq_len, 1)
            # 'a_tensor': a1_tensor,
            'as_p2': a2_s,  # (batch_size, sentence_seq_len, 1)
            'ae_p2': a2_e,  # (batch_size, sentence_seq_len, 1)
            # 'a_tensor2': a2_tensor,
            'is_on_logits': is_on_logits,  # (batch_size, 2)
            'os_p': o1_s,  # (batch_size, sentence_seq_len, 1)
            'oe_p': o1_e,  # (batch_size, sentence_seq_len, 1)
            # 'o_tensor': o1_tensor,
            'os_p2': o2_s,  # (batch_size, sentence_seq_len, 1)
            'oe_p2': o2_e,  # (batch_size, sentence_seq_len, 1)
            # 'o_tensor2': o2_tensor,
            's_logits': s_logits,  # (batch_size, 3)
            's_logits2': s_logits2  # (batch_size, 3)
        }

    pass
