# -*- coding: utf-8 -*-
# @Time    : 2025/9/19 10:36
# @Author  : chenmh
# @File    : layer.py
# @Desc: 实现 BST 模型中的网络结构层


import torch, typing
from torch import nn, optim
from utils import tensor_difference
from config import BSTConfig, bst_config


class PositionEmbedding(nn.Module):
    def __init__(self, config: BSTConfig):
        super().__init__()
        self.emb = nn.Sequential(
            nn.LayerNorm(normalized_shape=config.seq_len)
            , nn.Linear(config.seq_len, config.hidden_size)
            , nn.ReLU()
            , nn.Linear(config.hidden_size, config.hidden_size)
        )
        self.config = config

    def forward(self, times: torch.Tensor) -> torch.Tensor:
        """
        :param times: [batch_size,seq_len]
        :return:
        """
        # 差分得到时间间隔
        times = tensor_difference(tensor=times)
        # [batch_size,seq_len] ==> [batch_size,seq_len,seq_len]
        times = times.unsqueeze(dim=1).repeat(1, self.config.seq_len, 1)
        # [batch_size,seq_len,seq_len] ==> [batch_size,seq_len,hidden_size]
        output = self.emb(times)
        return output


if __name__ == '__main__':
    batch_size = 64
    seq_len = 200
    hidden_size = 256
    model = PositionEmbedding(config=bst_config)
    times = torch.randn((batch_size, seq_len))
    output = model(times)
    print(times.size(), output.size())
