# -*- coding: utf-8 -*-

import torch.nn as nn

from .transformer import TransformerBlock
from .embedding import BERTEmbedding


class BERT(nn.Module):
    """BERT模型: 来自Transformers的双向Encoder部分"""

    def __init__(self, vocab_size,
                 hidden=768,
                 n_layers=12,
                 attn_heads=12,
                 dropout=0.1):
        """
        :param vocab_size: 词汇表总字数
        :param hidden: BERT 模型隐藏层大小
        :param n_layers: Transformer blocks(layers)的数量
        :param attn_heads: attention heads的数量
        :param dropout: dropout率
        """

        super().__init__()

        self.hidden = hidden
        self.n_layers = n_layers
        self.attn_heads = attn_heads

        # 论文指出为ff_network_hidden_size使用了4 * hidden_size
        self.feed_forward_hidden = hidden * 4

        # BERT的嵌入：token，位置（positional），段落（segment）嵌入的总和
        self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)

        # 多层transformer块，深层网络
        self.transformer_blocks = nn.ModuleList(
            [TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])

    def forward(self, x, segment_info):
        # attention masking for padded token

        # torch.ByteTensor([batch_size, 1, seq_len, seq_len)
        mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)

        # embedding the indexed sequence to sequence of vectors
        x = self.embedding(x, segment_info)

        # running over multiple transformer blocks
        for transformer in self.transformer_blocks:
            x = transformer.forward(x, mask)

        return x
