# -*- coding: utf-8 -*-
# @Time    : 2025/9/19 11:14
# @Author  : chenmh
# @File    : model.py
# @Desc: 实现 BST 模型

import torch, typing
from torch import nn, optim
from layer import PositionEmbedding
from config import bst_config, BSTConfig
from transformers import BertModel, BertConfig
from transformers.models.bert.modeling_bert import BertEmbeddings
from torch.nn import Transformer, TransformerEncoder, TransformerEncoderLayer
import numpy as np
from bst.dataset import train_loader


class BSTransformer(nn.Module):
    def __init__(self, config: BSTConfig):
        super().__init__()
        self.config = config
        # item 的 embedding 模块直接使用 Bert 原生的
        self.item_embeddings = BertEmbeddings(config=config)
        self.extra_position_embedding = PositionEmbedding(config=config)
        self.static_embeddings = nn.ModuleList([
            nn.Embedding(num_embeddings=num_emb, embedding_dim=config.hidden_size, padding_idx=config.pad_token_id)
            for num_emb in config.static_embeddings_vocab_size
        ])
        # 提取序列特征的 transformer 结构
        self.transformer = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=config.hidden_size
                , nhead=config.n_heads
                , dim_feedforward=config.feedforward_size
                , batch_first=True
            )
            , num_layers=config.n_layers
        )
        # 分类头  和论文保持一致
        self.mlp = nn.Sequential(
            nn.Linear(config.hidden_size, config.feedforward_size)
            , nn.LeakyReLU()
            , nn.Linear(config.feedforward_size, config.feedforward_size // 2)
            , nn.LeakyReLU()
            , nn.Linear(config.feedforward_size // 2, config.hidden_size)
            , nn.LeakyReLU()
            , nn.Linear(config.hidden_size, config.hidden_size)
            , nn.Sigmoid()
        )
        self.classifier = nn.Linear(config.hidden_size * 2, 2)
        self.init_method = 'xavier'

        self._initialize_weights()

    def _initialize_weights(self):
        """在模型内部定义初始化逻辑"""
        for name, module in self.named_modules():
            if isinstance(module, (nn.Linear, nn.Conv1d)):
                if self.init_method == 'xavier':
                    # Xavier/Glorot 初始化（推荐用于 Transformer）
                    nn.init.xavier_uniform_(module.weight)
                elif self.init_method == 'xavier_normal':
                    nn.init.xavier_normal_(module.weight)
                elif self.init_method == 'kaiming':
                    nn.init.kaiming_uniform_(module.weight, nonlinearity='relu')
                else:
                    raise ValueError(f"Unknown init_method: {self.init_method}")

                if module.bias is not None:
                    nn.init.constant_(module.bias, 0.)

            elif isinstance(module, nn.Embedding):
                # 词嵌入：小方差正态分布
                nn.init.normal_(module.weight, mean=0.0, std=0.02)

            elif isinstance(module, nn.LayerNorm):
                # LayerNorm 的 weight 和 bias
                nn.init.ones_(module.weight)
                nn.init.zeros_(module.bias)

    def forward(self, input_ids: torch.Tensor, times: torch.Tensor,
                static_features: typing.List[torch.Tensor]) -> torch.Tensor:
        """
        :param input_ids: [batch_size,seq_len]
        :param times: [batch_size,seq_len]
        :param static_features: [[batch_size,1],...]
        :return:
        """
        # [batch_size,seq_len]==> [batch_size,seq_len,hidden_size]
        item_embeds = self.item_embeddings(input_ids)
        # [batch_size,seq_len]==> [batch_size,seq_len,hidden_size]
        time_embeds = self.extra_position_embedding(times)
        item_embeds = item_embeds + time_embeds
        # fixme 使用 TransformerEncoder 提取 token 特征
        item_embeds = self.transformer(item_embeds)
        # [batch_size,num(extra_features),hidden_size]
        extra_features_embeds = torch.cat([
            self.static_embeddings[i](static_features[i]) for i in range(len(static_features))
        ], dim=1)
        # [batch_size,num(extra_features)+seq_len,hidden_size]
        full_embeds = torch.cat([item_embeds, extra_features_embeds], dim=1)
        # [batch_size,num(extra_features)+seq_len,hidden_size]
        outputs = self.mlp(full_embeds)
        outputs_mean = outputs.mean(dim=1)  # [batch_size,hidden_size]
        outputs_max = outputs.max(dim=1).values  # [batch_size,hidden_size]
        # [batch_size,hidden_size]==>  [batch_size,hidden_size*2]==> [batch_size,2]
        logits = self.classifier(torch.cat([outputs_mean, outputs_max], dim=1))
        return logits


if False:
    batch_size = 4
    model = BSTransformer(config=bst_config)
    input_ids = torch.from_numpy(np.random.randint(0, 10, size=(batch_size, 200))).long()
    times = torch.tensor(np.random.randn(batch_size, 200), dtype=torch.float32)
    static_features = [torch.tensor(np.random.randint(0, 10, size=(batch_size, 1)), dtype=torch.long) for _ in
                       range(3)]
    # logits = model(input_ids=input_ids, times=times, static_features=static_features)
    # print(logits.size())

    for batch in train_loader:
        item = batch["item"]
        times = batch["times"]
        static_vals = batch["static_vals"]
        static_features = torch.split(static_vals, split_size_or_sections=1, dim=1)
        label = batch["label"]
        logits = model(input_ids=item, times=times, static_features=static_features)
        pred = logits.argmax(dim=1)
        prob = logits.softmax(dim=1)
        prob_positive = prob[:, 1]  # 正类（类别 1）的概率
        break
