import torch.nn as nn
import torch.nn.functional as F

class TemporalTransformer(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_heads, num_layers, dropout=0.1):
        super(TemporalTransformer, self).__init__()
        self.src_embedding = nn.Linear(input_dim, hidden_dim)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_dim,
            nhead=num_heads,
            dim_feedforward=hidden_dim * 4,
            dropout=dropout)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        self.decoder = nn.Linear(hidden_dim, 1)  # 二分类输出层
        self.init_weights()

    def init_weights(self):
        # 初始化权重，这里使用Xavier初始化
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, src):
        src = self.src_embedding(src)  # 输入嵌入
        src = src.permute(1, 0, 2)  # 调整序列维度以适应Transformer输入
        output = self.encoder(src)  # Transformer编码
        output = output.permute(1, 0, 2)  # 调整回原始序列维度
        # print(output[:, -1, :])
        output = self.decoder(output[:, -1, :])  # 取最后一个时间步的输出进行分类
        return F.sigmoid(output)  # 输出对数概率