# 和classification的transformer模型基本一致，只是输出维度不同
import torch
torch.set_num_threads(1)
import torch.nn as nn
from tab_transformer_pytorch import FTTransformer, TabTransformer

class TransformerRegressor(nn.Module):
    def __init__(self, num_cat_features, num_cont_features, num_classes=1,
                 embedding_dim=16, hidden_dim=128, dropout=0.5, **kwargs):
        super().__init__()
        if 'categories' not in kwargs:
            raise ValueError('categories is required')
        self.categories = kwargs['categories']

        self.transformer = TabTransformer(
            categories = self.categories,
            num_continuous = num_cont_features,
            dim = embedding_dim,
            dim_out = num_classes,
            depth = kwargs.get('depth', 6),
            heads = kwargs.get('heads', 4),
            attn_dropout = kwargs.get('attn_dropout', dropout),
            ff_dropout = dropout
        )

    def forward(self, batch):
        x_cat, x_cont, _ = batch

        for i, max_val in enumerate(self.categories):
            if x_cat[:, i].max() >= max_val or x_cat[:, i].min() < 0:
                raise ValueError(
                    f"Category column {i} has out-of-bound values. "
                    f"Allowed range: [0, {max_val-1}], "
                    f"found min {x_cat[:, i].min().item()}, max {x_cat[:, i].max().item()}"
                )

        x = self.transformer(x_cat, x_cont)
        return x.squeeze(-1)   # 输出 [batch_size]

