import torch
from torch import nn
from model.Embedding import TokenEmbedding, PositionalEncoding
from model.MyTransformer import MyTransformerEncoderLayer, MyTransformerEncoder


class Classification(nn.Module):
    def __init__(self, vocab_size=None,
                 d_model=512,
                 nhead=8,
                 num_encoder_layers=6,
                 dim_feedforward=2048,
                 dim_classification=64,
                 num_classification=4,
                 dropout=0.1):
        super(Classification, self).__init__()
        self.pos_embedding = PositionalEncoding(d_model=d_model, dropout=dropout)
        self.src_token_embedding = TokenEmbedding(vocab_size=vocab_size, emb_size=d_model)

        encoder_layer = MyTransformerEncoderLayer(d_model=d_model,
                                                  nhead=nhead,
                                                  dim_feedforward=dim_feedforward,
                                                  dropout=dropout)
        self.encoder = MyTransformerEncoder(encoder_layer=encoder_layer,
                                            num_layers=num_encoder_layers)

        self.classifier = nn.Sequential(nn.Linear(d_model, dim_classification),
                                        nn.Dropout(dropout),
                                        nn.Linear(dim_classification, num_classification))

    def forward(self, src, src_mask=None, src_key_padding_mask=None, concat_type='sum'):
        """

        Args:
            src: 输出样本，shape = [src_len, batch_size]
            src_mask: 掩盖当前时刻后的输入信息的掩码矩阵，文本分类任务中无需使用，默认为None
            src_key_padding_mask: 掩盖输入样本padding部分掩码矩阵
            concat_type: 解码之后取所有位置相加，还是最后一个位置作为输出

        Returns:

        """
        # [src_len, batch_size, embed_dim]
        src_embed = self.src_token_embedding(src)
        # [src_len, batch_size, embed_dim]
        src_embed = self.pos_embedding(src_embed)

        # 计算encoder输出 shape = [src_len, batch_size, embed_dim]
        memory = self.encoder(src=src_embed,
                              mask=None,
                              src_key_padding_mask=src_key_padding_mask)

        if concat_type == 'sum':
            memory = torch.sum(memory, dim=0)
        elif concat_type == 'avg':
            memory = torch.sum(memory, dim=0) / memory.size(0)
        else:
            memory = memory[-1, ::]

        # out shape = [batch_size, num_class]
        out = self.classifier(memory)
        return out
