import torch
import torch.nn as nn
import torch.nn.functional as F

class CustomGRUCell(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(CustomGRUCell, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size

        # 更新门和重置门参数合并为2*hidden_size
        self.weight_ih = nn.Parameter(torch.randn(2 * hidden_size, input_size))
        self.bias_ih = nn.Parameter(torch.randn(2 * hidden_size))
        self.weight_hh = nn.Parameter(torch.randn(2 * hidden_size, hidden_size))
        self.bias_hh = nn.Parameter(torch.randn(2 * hidden_size))

        # 候选隐藏状态参数
        self.weight_ih_c = nn.Parameter(torch.randn(hidden_size, input_size))
        self.bias_ih_c = nn.Parameter(torch.randn(hidden_size))
        self.weight_hh_c = nn.Parameter(torch.randn(hidden_size, hidden_size))
        self.bias_hh_c = nn.Parameter(torch.randn(hidden_size))

    def forward(self, x, h):
        # 计算更新门z和重置门r
        gates_x = F.linear(x, self.weight_ih, self.bias_ih)
        gates_h = F.linear(h, self.weight_hh, self.bias_hh)
        z, r = torch.sigmoid(gates_x + gates_h).chunk(2, 1)

        # 候选隐藏状态
        h_tilde = torch.tanh(
            F.linear(x, self.weight_ih_c, self.bias_ih_c) +
            F.linear(r * h, self.weight_hh_c, self.bias_hh_c)
        )
        new_h = (1 - z) * h + z * h_tilde
        return new_h

class CustomGRU(nn.Module):
    def __init__(self, input_size, hidden_size, batch_first=False, num_layers=1):
        super(CustomGRU, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_first = batch_first
        self.num_layers = num_layers

        self.cells = nn.ModuleList()
        for i in range(num_layers):
            input_dim = input_size if i == 0 else hidden_size
            self.cells.append(CustomGRUCell(input_dim, hidden_size))

    def forward(self, input, x_lens=None, hidden=None):
        if self.batch_first:
            input = input.transpose(0, 1)  # (B, S, D) -> (S, B, D)
        seq_len, batch_size, _ = input.size()
        device = input.device

        # 初始化隐藏状态
        if hidden is None:
            hidden = torch.zeros(self.num_layers, batch_size, self.hidden_size, device=device)
        else:
            hidden = hidden.view(self.num_layers, batch_size, self.hidden_size)

        # 根据x_lens生成掩码 (seq_len, batch_size)
        if x_lens is not None:
            max_len = input.size(0)
            mask = torch.arange(max_len, device=device)[:, None] < x_lens[None, :]
        else:
            mask = torch.ones(seq_len, batch_size, dtype=torch.bool, device=device)

        outputs = []
        for t in range(seq_len):
            x = input[t]
            current_mask = mask[t]  # (batch_size,)
            new_hiddens = []
            for layer in range(self.num_layers):
                h = hidden[layer]
                # 跳过无效时间步：仅更新有效序列的隐藏状态
                if layer == 0:
                    x_layer = x * current_mask[:, None]  # 输入掩码
                else:
                    x_layer = new_hiddens[layer - 1].detach() * current_mask[:, None]

                h_new = self.cells[layer](x_layer, h)
                # 保留无效位置的旧隐藏状态
                h = torch.where(current_mask[:, None], h_new, h)
                new_hiddens.append(h)
            hidden = torch.stack(new_hiddens, dim=0)
            outputs.append(hidden[-1])  # 取最后一层输出

        outputs = torch.stack(outputs, dim=0)
        if self.batch_first:
            outputs = outputs.transpose(0, 1)  # (S, B, D) -> (B, S, D)
        return outputs, hidden

if __name__ == '__main__':
    input_size = 10
    hidden_size = 32
    sequence_length = 20
    batch_size = 32

    # 输入数据
    X = torch.randn(batch_size, sequence_length, input_size)
    x_lens = torch.randint(1, sequence_length+1, (batch_size,))
    model = CustomGRU(input_size=input_size, hidden_size=hidden_size, batch_first=True)
    outputs, _ = model(X, x_lens)
    print(outputs.shape)  # (batch_size, sequence_length, hidden_size)