# imports
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchinfo import summary

# recurrent neural network implement from scratch
class CustomLSTM(nn.Module):
    """
    @param input_size: 输入矩阵的最后一维的维度
    @param hidden_size: 期望的隐藏层的维度
    @param layer_nums: 隐藏层的层数
    """
    def __init__(self, input_size, hidden_size, layer_nums = 2):
        super(CustomLSTM, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.layer_nums = layer_nums

        # Learnable parameters
        # forget gate
        self.forget_fcs = nn.ModuleList()
        self.forget_fcs.append(nn.Linear(input_size + hidden_size, hidden_size)) # first layer
        for i in range(layer_nums - 1):
            self.forget_fcs.append(nn.Linear(hidden_size + hidden_size, hidden_size))
        self.forget_sigma = F.sigmoid

        # input gate
        # sigmoid
        self.input_fcs_sigmoid = nn.ModuleList()
        self.input_fcs_sigmoid.append(nn.Linear(input_size + hidden_size, hidden_size)) # first layer
        for i in range(layer_nums - 1):
            self.input_fcs_sigmoid.append(nn.Linear(hidden_size + hidden_size, hidden_size))
        self.input_sigma_sigmoid = F.sigmoid

        # tanh
        self.input_fcs_tanh = nn.ModuleList()
        self.input_fcs_tanh.append(nn.Linear(input_size + hidden_size, hidden_size)) # first layer
        for i in range(layer_nums - 1):
            self.input_fcs_tanh.append(nn.Linear(hidden_size + hidden_size, hidden_size))
        self.input_sigma_tanh = F.tanh

        # output gate
        self.output_fcs = nn.ModuleList()
        self.output_fcs.append(nn.Linear(input_size + hidden_size, hidden_size))
        for i in range(layer_nums - 1):
            self.output_fcs.append(nn.Linear(hidden_size + hidden_size, hidden_size))
        self.output_sigma_tanh = F.tanh
        self.output_sigma_sigmoid = F.sigmoid



    def forward(self, x):
        x = x.permute(1, 0, 2)
        # x: (seq_len, batch_size, input_size)
        seq_len, batch_size, _ = x.size()
        hidden_states = torch.zeros(self.layer_nums, batch_size, self.hidden_size, device=x.device)
        cell_states = torch.zeros(self.layer_nums, batch_size, self.hidden_size, device=x.device)

        outputs = []
        for t in range(seq_len):
            temp = x[t]  # Input at first layer: (batch_size, input_size)
            for layer in range(self.layer_nums):
                temp = torch.cat((temp, hidden_states[layer]), dim=1)

                # gate output
                forget_t = self.forget_sigma(self.forget_fcs[layer](temp))
                input_sigmoid_t = self.input_sigma_sigmoid(self.input_fcs_sigmoid[layer](temp))
                input_tanh_t = self.input_sigma_tanh(self.input_fcs_tanh[layer](temp))
                output_t = self.output_sigma_sigmoid(self.output_fcs[layer](temp))

                # update cell_state and hidden_state
                cell_states[layer] = cell_states[layer].clone() * forget_t + input_sigmoid_t * input_tanh_t
                hidden_states[layer] = self.output_sigma_tanh(cell_states[layer]) * output_t
                temp = hidden_states[layer]

            outputs.append(hidden_states[-1])

        outputs = torch.stack(outputs, dim=0)  # (seq_len, batch_size, output_size)
        return outputs.permute(1, 0, 2)

# create recurrent neural networks
class LSTM(nn.Module):
    def __init__(self, input_size, hidden_size, seq_nums, num_layers, num_classes):
        super(LSTM, self).__init__()
        self.lstm = CustomLSTM(input_size, hidden_size, num_layers)
        self.fc = nn.Linear(hidden_size * seq_nums, num_classes)

    def forward(self, x):
        x = self.lstm(x)
        return self.fc(x.reshape(x.size(0), -1))

class LSTMLib(nn.Module):
    def __init__(self, input_size, hidden_size, seq_nums, num_layers, num_classes, device):
        super(LSTMLib, self).__init__()
        self.device = device
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size * seq_nums, num_classes)
    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)
        out, _ = self.lstm(x, (h0, c0))
        out = out.reshape(out.size(0), -1)
        out = self.fc(out)
        return out



# train
def train(model, loss_fn, optimizer, dataloader, epochs, device):
    model.train()
    print("=" * 10 + "training" + "=" * 10)
    for epoch in range(epochs):
        correct_samples = 0
        total_samples = 0
        total_loss = 0

        for batch_idx, (data, target) in enumerate(dataloader):
            data, target = data.to(device), target.to(device)
            # data = data.reshape(data.shape[0], -1)
            data = data.squeeze(1)

            result = model(data)
            loss = loss_fn(result, target)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss.item()
            total_samples += target.shape[0]
            correct_samples += target.eq(result.argmax(-1)).sum().item()
        print(f"epoch: {epoch}. loss: {total_loss/ total_samples}, accuracy: {correct_samples / total_samples * 100:.2f}% ")


def test(model, loss_fn, dataloader, device):
    model.eval()
    print("=" * 10 + "testing" + "=" * 10)
    with torch.no_grad():
        total_samples = 0
        correct_samples = 0
        total_loss = 0
        for batch_idx, (data, target) in enumerate(dataloader):
            data, target = data.to(device), target.to(device)
            # data = data.reshape(data.shape[0], -1)
            data = data.squeeze(1)

            result = model(data)
            loss = loss_fn(result, target)

            total_loss += loss.item()
            total_samples += target.shape[0]
            correct_samples += target.eq(result.argmax(-1)).sum().item()
        print(f"loss: {loss / total_samples}, accuracy: {correct_samples / total_samples * 100:.2f}% ")

# setup device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# hyperparameters
batch_size = 64
input_size = 28
seq_nums = 28
hidden_size = 256
num_layers = 1
num_classes = 10
learning_rate = 0.001
epochs = 10

# load data
train_data = datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_data = datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=batch_size, shuffle=True)

# initialize network, loss, optimizer
model = LSTM(input_size=input_size, hidden_size=hidden_size, seq_nums=seq_nums, num_layers=num_layers, num_classes=num_classes).to(device)
#model = LSTMLib(input_size=input_size, hidden_size=hidden_size, seq_nums=seq_nums, num_layers=num_layers, num_classes=num_classes, device=device).to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

seed = 114514
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)  # if using multi-GPU.
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


if __name__ == '__main__':
    train(model, loss_fn, optimizer, train_loader, epochs, device)
    test(model, loss_fn, test_loader, device)
    #summary(model, input_size=(batch_size, seq_nums, input_size), depth=1)





