import torch
import torch.nn as nn
import torch.nn.functional as F


def custom_gru(input, initial_state, w_ih, w_hh, b_ih, b_hh):
    prev_h = initial_state
    bs, T, i_size = input.shape
    h_size = w_ih.shape[0] // 3
    batch_w_ih = w_ih.unsqueeze(0).tile(bs, 1, 1)
    batch_w_hh = w_hh.unsqueeze(0).tile(bs, 1, 1)

    output = torch.zeros(bs, T, h_size)  # 输出状态序列

    for t in range(T):
        x = input[:, t, :]  # 当前时刻的输入[bs, i_size]
        w_times_x = torch.bmm(batch_w_ih, x.unsqueeze(-1))  # [bs, 3 * h_size, 1]
        w_times_x = w_times_x.squeeze(-1)  # [bs, 3_* i_size]

        w_time_h_prev = torch.bmm(batch_w_hh, prev_h.unsqueeze(-1))  # [bs, 3 * h_size, 1]
        w_time_h_prev = w_time_h_prev.squeeze(-1)  # [bs, 3 * h_size]

        # reset gate
        r_t = torch.sigmoid(w_times_x[:, :h_size] + w_time_h_prev[:, :h_size] + \
                            b_ih[:h_size] + b_hh[:h_size])
        # update gate
        z_t = torch.sigmoid(w_times_x[:, h_size:2 * h_size] + w_time_h_prev[:, h_size:2 * h_size] + \
                            b_ih[h_size: 2 * h_size] + b_hh[h_size: 2 * h_size])
        n_t = torch.tanh(w_times_x[:, 2 * h_size:] + b_ih[2 * h_size:] + \
                         r_t * (w_time_h_prev[:, 2 * h_size:] + b_hh[2 * h_size:]))
        prev_h = (1 - z_t) * n_t + z_t * prev_h  # 状态更新

        output[:, t, :] = prev_h
    return output, prev_h


# 测试函数正确性
bs, T, i_size, h_size = 2, 3, 4, 5
input = torch.randn(bs, T, i_size)
h0 = torch.randn(bs, h_size)
gru_layer = nn.GRU(input_size=i_size, hidden_size=h_size, batch_first=True)
output, h_final = gru_layer(input, h0.unsqueeze(0))

print(output)
print(h_final)

# for k, v in gru_layer.named_parameters():
#     print(k, v.shape)

# 调用自定义的gru函数
output_define, h_define = custom_gru(input, h0, gru_layer.weight_ih_l0, gru_layer.weight_hh_l0,
                                     gru_layer.bias_ih_l0, gru_layer.bias_hh_l0)
print(output_define)
print(h_define)

print(torch.allclose(output, output_define))
print(torch.allclose(h_final, h_define))