import math

import torch
from torch import nn


class PositionEncoding(nn.Module):
    """
    位置编码
    """

    def __init__(self, embedding_dims, max_len, dropout):
        super().__init__()

        self.dropout = nn.Dropout(dropout)
        self.P = torch.zeros((1, max_len, embedding_dims))
        d_model = embedding_dims

        x = torch.arange(max_len).unsqueeze(-1) / torch.pow(10000, torch.arange(0, embedding_dims, 2) / d_model)
        self.P[:, :, 0::2] = torch.sin(x)
        self.P[:, :, 1::2] = torch.cos(x)

    def forward(self, x):  # x是经过词嵌入之后的值。 (batch_size,seq_len,embedding_dims)
        self.P = self.P.to(x.device)
        seq_len = x.shape[1]
        pe = self.P[:, :seq_len, :]
        return self.dropout(x + pe)


class TextClassifierModel(nn.Module):
    def __init__(self, vocab_size, embedding_dims, nhead, num_layers):
        super().__init__()
        self.embedding_dims = embedding_dims
        self.vocab_size = vocab_size

        self.embedding = nn.Embedding(vocab_size, embedding_dims)
        self.position_encode = PositionEncoding(embedding_dims, vocab_size, 0.1)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=embedding_dims, nhead=nhead, batch_first=True)
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)

        self.fc = nn.Sequential(
            nn.Linear(embedding_dims, embedding_dims // 2),
            nn.ReLU(),
            nn.Linear(embedding_dims // 2, 1),
            nn.Sigmoid()
        )

    def forward(self, x, valid_lens):
        batch_size = x.shape[0]
        x = self.embedding(x)
        x = self.position_encode(x)

        # pad掩码
        pad_mask = None
        if valid_lens is not None:
            pad_mask = _sequence_mask(x, valid_lens)
            pad_mask = torch.tensor(pad_mask, device=x.device)
        x = self.transformer_encoder(x, src_key_padding_mask=pad_mask)
        x = self.fc(x[:, 0, :])
        return x


def _sequence_mask(scores, valid_lens):
    valid_lens = torch.tensor(valid_lens, dtype=torch.float32)
    max_len = scores.shape[1]
    # 广播机制
    mask = torch.arange((max_len), dtype=torch.float32)[None, :] > valid_lens[:, None]
    mask = mask.float()
    mask[mask == 1] = float(-math.inf)
    return mask


# if __name__ == '__main__':
#     inputs = torch.randint(0, 10, (10, 9))
#     vocab_size = 10  # word分类
#     embedding_size = 32  # 词嵌入大小
#     nhead = 4  # 多头注意力的头数
#     model = TextClassifierModel(vocab_size, embedding_size, nhead, 3)
#     outputs = model(inputs, None)
#     print(outputs.shape)  # [10, 1] 每个句子分别有一个类别判定
