'''
    bert->attention->lstm
'''

import torch
import torch.nn as nn
from layers.squeeze_embedding import SqueezeEmbedding
from layers.attention import Attention
from layers.dynamic_rnn import DynamicLSTM

class BERT_LSTM(nn.Module):
    def __init__(self, bert, opt):
        super(BERT_LSTM, self).__init__()
        self.opt = opt
        self.bert = bert
        self.squeeze_embedding = SqueezeEmbedding()
        self.dropout = nn.Dropout(opt.dropout)

        self.attention = Attention(opt.bert_dim, out_dim=opt.hidden_dim, n_head=8, score_function='mlp', dropout=opt.dropout)
        self.lstm = DynamicLSTM(opt.hidden_dim, opt.hidden_dim, num_layers=1, batch_first=True)

        self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)  #预测情绪极性的分类层

        self.softmax = nn.Softmax(dim=1)

    def forward(self, inputs):
        context, target = inputs[0], inputs[1]  #拿到输入的tensor,intputs[0],[1]都是16*85，也就是16个句子，每个句子85个单词
        context_len = torch.sum(context != 0, dim=-1)#计算所有句子的长度，以一维张量的形式返回，维度为batchsize
        target_len = torch.sum(target != 0, dim=-1)  #拿到文本长度和方面词长度,方面词是有长度的。返回16维度的向量，cls+单词1+sep=3，cls+单词1+单词2+sep=5

        context = self.squeeze_embedding(context, context_len)  #虽然句子全部补全为85，但是batch里面最长的是60的话，就压缩长度到60
        context, _ = self.bert(context)  #拿到的第一个输出就是词嵌入向量，转换为词嵌入向量计算也花了几秒钟
        context = self.dropout(context)


        target = self.squeeze_embedding(target, target_len) #一开始限制的方面词的长度最大值为多少?
        target, _ = self.bert(target)  #  target:batchsize*TargetSeqlength*dim，第二个参数是？
        target = self.dropout(target)

        out, _ = self.attention(target, context)  # 将每一个上下文词和目标短语进行注意力计算，输入是batch*seq*dim,ht是计算完注意力后的表示
        _,(h_t,_) = self.lstm(out, context_len)  #返回的是(out,ht,ct),out是每一个时刻的输出，（ht,ct）是最后时刻的隐藏态和细胞态

        out = self.dense(h_t[0])
        out = self.softmax(out)
        return out