import torch
from torch import nn


class Embedding(nn.Module):
    def __init__(self, features:int, d_model:int, device):
        super(Embedding, self).__init__()
        self.d_model = d_model
        self.features = features
        self.line = nn.Linear(self.features, self.d_model).to(device=device)

    # 输入一个(样本数量，时间点个数，特征数)的向量，将特征维映射到d_model维
    def forward(self, x:torch.Tensor) -> torch.Tensor:
        return self.line(x)


class TransformerModel(nn.Module):
    def __init__(self, features:int, d_model:int, device):
        super(TransformerModel, self).__init__()
        self.d_model = d_model
        self.embedding = Embedding(features, d_model, device)
        self.transformer = nn.Transformer(d_model=self.d_model, batch_first=True).to(device=device)

    # src是上游风机序列，(样本数，时间点，特征)，tgt是下游序列
    def forward(self, src:torch.Tensor, tgt:torch.Tensor) -> torch.Tensor:
        # 生成mask
        tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.shape[1]).to(tgt.device)
        src_key_padding_mask = None
        tgt_key_padding_mask = None

        # 对src和tgt进行编码
        src = self.embedding(src)
        tgt = self.embedding(tgt)

        out = self.transformer(src, tgt,
                               tgt_mask=tgt_mask,
                               src_key_padding_mask=src_key_padding_mask,
                               tgt_key_padding_mask=tgt_key_padding_mask)
        return out
        