import torch
import torch.nn as nn
import torch.nn.functional as F


def defined_lstm(input, initial_state, w_ih, w_hh, b_ih, b_hh):
    # 初始状态
    h0, c0 = initial_state
    bs, T, i_size = input.shape
    h_size = w_ih.shape[0] // 4
    prev_h = h0
    prev_c = c0

    batch_w_ih = w_ih.unsqueeze(0).tile(bs, 1, 1)  # [bs, 4 * h_size, i_size]
    batch_w_hh = w_hh.unsqueeze(0).tile(bs, 1, 1)  # [bs, 4 * h_size, h_size]

    output_size = h_size
    # 输出矩阵初始化
    output = torch.zeros(bs, T, output_size)

    for t in range(T):
        x = input[:, t, :]  # 当前时刻的输入向量 [bs, i_size]
        w_times_x = torch.bmm(batch_w_ih, x.unsqueeze(-1))  # [bs, 4 * h_size, 1]
        w_times_x = w_times_x.squeeze(-1)  # [bs, 4 * h_size]

        w_times_h_prev = torch.bmm(batch_w_hh, prev_h.unsqueeze(-1))  # [bs, 4 * h_size, 1]
        w_times_h_prev = w_times_h_prev.squeeze(-1)

        # input gate
        i_t = torch.sigmoid(w_times_x[:, :h_size] + w_times_h_prev[:, :h_size] + b_ih[:h_size] + b_hh[:h_size])
        # forget gate
        f_t = torch.sigmoid(
            w_times_x[:, h_size:2 * h_size] + w_times_h_prev[:, h_size: 2 * h_size] + b_ih[h_size: 2 * h_size] + \
            b_hh[h_size: 2 * h_size])
        # output gate
        o_t = torch.sigmoid(
            w_times_x[:, 3 * h_size:] + w_times_h_prev[:, 3 * h_size:] + b_ih[3 * h_size:] + b_hh[3 * h_size:])
        # cell state
        g_t = torch.tanh(w_times_x[:, 2 * h_size: 3 * h_size] + w_times_h_prev[:, 2 * h_size: 3 * h_size] + \
                         b_ih[2 * h_size: 3 * h_size] + b_hh[2 * h_size: 3 * h_size])
        prev_c = f_t * prev_c + i_t * g_t
        prev_h = o_t * torch.tanh(prev_c)
        output[:, t, :] = prev_h

    return output, (prev_h, prev_c)


bs, T, i_size, h_size = 2, 3, 4, 5
input = torch.randn(bs, T, i_size)
c0 = torch.randn(bs, h_size)
h0 = torch.randn(bs, h_size)

lstm_layer = nn.LSTM(input_size=i_size, hidden_size=h_size, batch_first=True)
output, (h_final, c_final) = lstm_layer(input, (h0.unsqueeze(0), c0.unsqueeze(0)))

# for k, v in lstm_layer.named_parameters():
#     print(k, v.shape)

# test defined_lstm
output_defined, (h_defined, c_defined) = defined_lstm(input, (h0, c0), lstm_layer.weight_ih_l0, lstm_layer.weight_hh_l0, \
                                                      lstm_layer.bias_ih_l0, lstm_layer.bias_hh_l0)

print(output)
print(output_defined)
print(torch.allclose(output, output_defined))