# -*- coding:utf-8  -*-
# @Author: ChenYangMing
# @Time: 2024-08-30

import torch.nn as nn

from Common.config_utils import Config


class TransformerModel(nn.Module):
    """
    Transformer模型，用于处理序列到标签的任务。
    :param input_dim: int，输入特征的维度。
    :param output_dim: int，输出标签的维度。
    :param num_heads: int，多头注意力机制中的头数，默认取Config类中的NUM_HEADS。
    :param hidden_dim: int，前馈网络的维度，默认取Config类中的HIDDEN_DIM。
    :param num_layers: int，Transformer编码器层的数量，默认取Config类中的NUM_LAYERS。
    :param dropout: float，Dropout比率，默认取Config类中的DROPOUT_RATE。
    """
    def __init__(
            self,
            input_dim,
            output_dim,
            num_heads=Config.NUM_HEADS,
            hidden_dim=Config.HIDDEN_DIM,
            num_layers=Config.NUM_LAYERS,
            dropout=Config.DROPOUT_RATE,
    ):
        super(TransformerModel, self).__init__()
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=input_dim, nhead=num_heads,
                                                        dim_feedforward=hidden_dim, dropout=dropout)
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
        self.linear = nn.Linear(input_dim, output_dim)

    def forward(
            self,
            src,
    ):
        """
        前向传播方法。
        :param src: torch.Tensor，输入的源数据，形状为 (batch_size, seq_length, input_dim)。
        :return: torch.Tensor，模型的输出，形状为 (batch_size, output_dim)。
        """
        # 假设 src 的形状是 (batch_size, seq_length, input_dim)
        src = src.permute(1, 0, 2)  # 转换形状为 (seq_length, batch_size, input_dim) -> (时间步数, 样本数, 特征数)
        enc_output = self.transformer_encoder(src)
        output = self.linear(enc_output[-1])  # 取最后一个时间步的输出
        return output


# 调用示例
# input_dim = 10
# output_dim = 1
# batch_size = 4096
# import torch
# from torch.utils.data import DataLoader, TensorDataset
# # X 是三维数组 (样本数，历史时间步长，特征数)
# X_tensor = torch.tensor(X, dtype=torch.float32)
# y_tensor = torch.tensor(y, dtype=torch.float32)
# dataset = TensorDataset(X_tensor, y_tensor)
# dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# model = TransformerModel(input_dim, output_dim)
