import torch
import torch.nn as nn

class GazeRNN(nn.Module):
    def __init__(self):
        super().__init__()
        # LSTM层：输入(10, 21)，输出(10, 128)
        self.lstm = nn.LSTM(
            input_size=21,
            hidden_size=128,
            num_layers=1,
            batch_first=True
        )
        # 全连接层
        self.fc1 = nn.Linear(128, 64)
        self.fc2 = nn.Linear(64, 32)
        self.fc3 = nn.Linear(32, 16)
        self.out = nn.Linear(16, 2)
        # He初始化
        for m in [self.fc1, self.fc2, self.fc3]:
            nn.init.kaiming_uniform_(m.weight, nonlinearity='relu')
        nn.init.kaiming_uniform_(self.out.weight, nonlinearity='linear')
        # L1正则化参数
        self.l1_lambda = 1e-4

    def forward(self, x):
        # x: (batch, seq_len=10, feature=21)
        lstm_out, _ = self.lstm(x)  # (batch, seq_len, 128)
        x = lstm_out[:, -1, :]      # 取最后一个时间步
        x = nn.functional.relu(self.fc1(x))
        x = nn.functional.relu(self.fc2(x))
        x = nn.functional.relu(self.fc3(x))
        out = self.out(x)
        return out

    def l1_regularization(self):
        l1 = 0
        for m in [self.fc1, self.fc2, self.fc3, self.out]:
            l1 += torch.sum(torch.abs(m.weight))
        return self.l1_lambda * l1