import torch
import torch.nn as nn

from transformers import BertModel
from model.attention import MultiHeadAttention
from model.layernorm import LayerNorm


class ComModel(nn.Module):
    def __init__(self, args):
        super().__init__()
        local_bert_model_path = "../bert_model/" + args.bert_type  # 这里的路径是相对于项目的根路径
        # 由于没有在 main 方法里，这里使用 ../ 这样的相对路径会出现问题，但是使用基于项目根路径的 path 是可以的
        self.embed_bert_model = BertModel.from_pretrained(local_bert_model_path)
        self.embed_dropout = nn.Dropout(0.1)
        embed_size = self.embed_bert_model.config.hidden_size  # 768，为什么是 768 呢？768 = 4 * 4 * 4 * 4 * 3

        self.a_rep = nn.Linear(embed_size, embed_size, bias=False)
        self.a_ffn = nn.Sequential(
            nn.Linear(embed_size, 2, bias=False)
        )

        self.o_rep = nn.Linear(embed_size, embed_size, bias=False)
        self.o_ffn = nn.Sequential(
            nn.Linear(embed_size, 2, bias=False)
        )

        self.s_att = MultiHeadAttention(2, 768, 768, 768, 768)
        self.s_norm = LayerNorm(768)  # s_norm[batch_size, sentence_seq_len, 768]

        # 自己加的
        self.o_att = MultiHeadAttention(2, 768, 768, 768, 768)
        self.o_norm = LayerNorm(768)

        # 自己加的 2
        self.a_att = MultiHeadAttention(2, 768, 768, 768, 768)
        self.a_norm = LayerNorm(768)

        self.layer_dropout = nn.Dropout(0.1)  #
        self.s_ffn = nn.Sequential(
            nn.Linear(embed_size, 3)
        )

        self.is_on_ffn = nn.Sequential(
            nn.Linear(3 * embed_size, 2),
        )
        pass

    def get_aspect(self, inputs):
        bert_feature = self.embed_bert_model(**inputs).last_hidden_state  # (batch_size, seq_len, 768)
        bert_feature = self.embed_dropout(bert_feature)  # bert_feature 的 size 应该不会发生什么变化

        sentence_part_feature = bert_feature[:, 1:, :]  # 提取出每个句子排除掉 query 的部分

        # aspect
        # a_rep = self.a_rep(sentence_part_feature)  # a_rep 是一个 nn.Liner() 输入为 768 输出为 768，a_rep 的维度是 (batch_size, sentence_seq_len, 768)
        a_rep = self.a_att(sentence_part_feature, sentence_part_feature, sentence_part_feature)
        a_rep = self.a_norm(self.layer_dropout(sentence_part_feature + a_rep))
        a_logits = self.a_ffn(a_rep)  # a_ffn 是一个 nn.Linear() 输入为 768 输出为 2，得到的这个 a_logits 的维度是 (batch_size, sentence_seq_len, 2)

        as_p, ae_p = a_logits.split(1, dim=-1)  # as_p: (batch_size, sentence_seq_len, 1), ae_p: (batch_size, sentence_seq_len, 1)
        # B++ x S, B++ x S
        as_p = as_p.squeeze(-1)  # as_p: 一个【方面】开始的字符串所在的索引：变为 (batch_size, sentence_seq_len)
        ae_p = ae_p.squeeze(-1)  # ae_p: 一个【方面】结束的字符串所在的索引：变为 (batch_size, sentence_seq_len)

        return as_p, ae_p, sentence_part_feature, a_rep, bert_feature

    def forward(self, inputs, as_index, ae_index, args, plus=None):
        if plus is None:  # plus 是在 test 的时候由外部传入的 get_aspect() 的结果
            as_p, ae_p, sentence_span, a_rep, bert_feature = self.get_aspect(inputs)
            # as_p 和 ae_p 的 shape 都是 (batch_size, sentence_seq_len, 1)
            # sentence_span 的 shape 是 (batch_size, sentence_seq_len, 768)
            # a_rep 的 shape 是 (batch_size, sentence_seq_len, 768)
            # bert_feature 的 shape 是 (batch_size, seq_len, 768)
            pass
        else:
            as_p, ae_p, sentence_span, a_rep, bert_feature = plus
            pass

        # opinion
        # o_rep = self.o_rep(sentence_span)  # o_rep 模块本质是一个 nn.Linear() 此时输入输出都是 (batch_size, sentence_seq_len, 768)
        o_rep = self.o_att(a_rep, sentence_span, sentence_span)
        o_rep = self.o_norm(self.layer_dropout(sentence_span + o_rep))
        o_logits = self.o_ffn(o_rep)  # 这一段的逻辑其实和上边的 a_rep 的逻辑是一样的，得到的输出是 (batch_size, sentence_seq_len, 2)

        os_p, oe_p = o_logits.split(1, dim=-1)
        os_p = os_p.squeeze(-1)  # os_p 维度是 (batch_size, sentence_seq_len, 1)
        oe_p = oe_p.squeeze(-1)  # oe_p 维度是 (batch_size, sentence_seq_len, 1)

        # sentiment
        s_y = self.s_att(sentence_span, a_rep, o_rep)  # s_att 是多头注意力模块，这三个输入以及输出 # s_att[batch_size, sentence_seq_len, 768]
        s_x = self.s_norm(self.layer_dropout(sentence_span + s_y))  # s_x 的 shape(batch_size, sentence_seq_len, 768)
        s_x = torch.max(s_x, dim=1)[0]  # s_x[batch_size, 768]
        s_logits = self.s_ffn(s_x)  # s_logits[batch_size, 3]   # 这里感觉好像很奇怪

        # is_on
        a_x_pooling = torch.max(a_rep, dim=1)[0]  # a_rep 的 shape=(batch_size, sentence_seq_len, 768)，a_x_pooling 的 shape=（batch_size, 768）
        o_x_pooling = torch.max(o_rep, dim=1)[0]  # 同上
        is_on_logits = self.is_on_ffn(torch.cat((bert_feature[:, 0, :], a_x_pooling, o_x_pooling), 1))  # bert_feature 是 (batch_size, seq_len, 768)
        # 这里 bert_feature[:, 0, :] 得到就是 (batch_size, 768) 这样的维度
        # 所以 torch.cat() 之后得到的就是一个 (batch_size, 768 * 3)
        # 这里 is_on_ffn 是一个 nn.Linear()，输入为 3 * 768，输出是 2
        # 所以 is_on_logits 的维度是 (batch_size, 2)

        return {
            'as_p': as_p,  # (batch_size, sentence_seq_len, 1)
            'ae_p': ae_p,  # (batch_size, sentence_seq_len, 1)
            'is_on_logits': is_on_logits,  # (batch_size, 2)
            'os_p': os_p,  # (batch_size, sentence_seq_len, 1)
            'oe_p': oe_p,  # (batch_size, sentence_seq_len, 1)
            's_logits': s_logits  # (batch_size, 3)
        }

    pass
