import torch
torch.set_num_threads(1)
import torch.nn as nn
from src.models.layer.embedding_layer import TabularEmbedding

class RNNRegressor(nn.Module):
    def __init__(self, num_cat_features, num_cont_features, num_classes=1,
                 embedding_dim=16, hidden_dim=128, dropout=0.3):
        super().__init__()
        
        self.embedding = TabularEmbedding(
            num_cat_features, num_cont_features, 
            embedding_dim=embedding_dim, 
            cont_hidden_dim=hidden_dim, 
            fuse=False,
            cont_embed=True
        )

        self.rnn = nn.RNN(
            input_size=embedding_dim, 
            hidden_size=hidden_dim,
            batch_first=True
        )
        self.hidden_size = hidden_dim

        self.regressor = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, num_classes)
        )

    def forward(self, batch):
        x_cat, x_cont, _ = batch
        x_cat_embedded, x_cont_embedded = self.embedding(x_cat, x_cont)

        # 类别特征变成序列 [batch, num_cat_features, embedding_dim]
        x_cat_seq = x_cat_embedded.permute(0, 2, 1)

        # 连续特征变成序列 [batch, num_cat_features, embedding_dim]
        x_cont_seq = x_cont_embedded.permute(0, 2, 1)

        # 拼接成序列 [batch, seq_len, input_size]
        x = torch.cat([x_cat_seq, x_cont_seq], dim=1)

        # 送进 RNN
        output, hidden = self.rnn(x)  # output: [batch, seq_len, hidden_size]

        # 取最后一个时间步
        out = output[:, -1, :]        # [batch, hidden_size]

        # 回归
        out = self.regressor(out)     # [batch, output_size]
        return out.squeeze(-1)
