# # %%
# #!/usr/bin/env python
# # -*- encoding: utf-8 -*-
# '''
# @File    :   learn_one.py
# @Time    :   2021-11-08 16:00:16
# @Author  :   GuoLiuFang
# @Version :   0.1
# @Contact :   909104374@qq.com
# @License :   (C)Copyright 2018-2021, RandomMatrix
# @Desc    :   None
# '''
# import logging
# file_handler = logging.FileHandler(filename='log.log')
# stdout_handler = logging.StreamHandler()
# logging.basicConfig(
#     level=logging.DEBUG,
#     handlers=[file_handler, stdout_handler],
#     format='%(asctime)s - %(processName)s - %(name)s - %(relativeCreated)d - %(threadName)s - %(levelname)s -- %(message)s'
# )

# # import other libs

# # %%
# import torch
# x = torch.tensor([1, 2, 3])
# y = torch.tensor([9, 8, 7])
# z1 = torch.empty(3)
# torch.add(x, y, out=z1)
# logging.info(z1)
# # %%
# batch_size = 32
# n = 10
# m = 20
# p = 30
# tensor1 = torch.rand((batch_size, n, m))
# tensor2 = torch.rand((batch_size, m, p))
# out_bmm = torch.bmm(tensor1, tensor2)
# # %%
# # Example of Broadcasting
# x1 = torch.rand((5, 5))
# x2 = torch.rand((1, 5))
# z = x1 - x2
# %%
import torch
import torch.nn as nn
from torch.nn import modules
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# %%
class BRNN(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, num_classes):
        super(BRNN, self).__init__()
        # features
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True)
        self.fc = nn.Linear(hidden_size * 2, num_classes)
    def forward(self, x):
        h0 = torch.zeros(2 * self.num_layers, x.size(0), self.hidden_size).to(device)
        c0 = torch.zeros(2 * self.num_layers, x.size(0), self.hidden_size).to(device)
        # output, (hn,cn)
        out, _ = self.lstm(x, (h0, c0))
        out = self.fc(out[:, -1, :], self.num_classes)
        return out




# %%
class RNN(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, num_classes):
        super(RNN, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.rnn = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
        # N x time_sequence x feat
        self.fc = nn.Linear(hidden_size, num_classes)
        # sequence_legth 是可以随便设置的吗？显然不是。。
        # we will use all the information from hidden state .
        # hidden_size * sequence_legth 相当于卷集后的结果。。
    def forward(self, x):
        # initialized the hidden state first.
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device=device)
        # a hidden state and a Cell state
        # so C for Cell state
        # kind of a hidden state
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device=device)
        # Forward Prop
        # _ is hidden state 
        out, _ = self.rnn(x, (h0, c0))
        # only the last information
        # out = out.reshape(out.shape[0], -1)
        # N x hidden_size x features
        out = self.fc(out[:, -1, :])
        return out

class NN(nn.Module):
    def __init__(self, input_size, num_classes):
        super(NN, self).__init__()
        self.fc1 = nn.Linear(input_size, 50)
        self.fc2 = nn.Linear(50, num_classes)
    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x
# %%
class CNN(nn.Module):
    def __init__(self, in_channel = 1, num_classes = 10):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=(3,3), stride=(1,1), padding=(1,1))
        self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
        self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(3,3), stride=(1,1), padding=(1,1))
        self.fc1 = nn.Linear(16 * 7 * 7, num_classes)
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.pool(x)
        x = F.relu(self.conv2(x))
        x = self.pool(x)
        x = x.reshape(x.shape[0], -1)
        x = self.fc1(x)
        return x

# %%
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyperparameters

hidden_size = 256
num_layers = 2
sequence_legth = 28
# input_size 指的是features size ？？
input_size = 28

num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 2
# %%
# model = NN(784, 10)
# x = torch.randn(64, 784)
# print(model(x).shape)

# model = CNN()
# x = torch.randn(64 , 1, 28, 28)
# print(model(x).shape)

model = RNN(input_size, hidden_size, num_layers, num_classes)
x = torch.randn(64 , 1, 28, 28)
x = x.squeeze(1)
print(model(x).shape)
# %%
# Load Data
train_dataset = datasets.MNIST(root='./dataset/', train=True, transform=transforms.ToTensor(), download=False)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = datasets.MNIST(root='./dataset/', train=False, transform=transforms.ToTensor(), download=False)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)

# Initial Model

model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)

criterion = nn.CrossEntropyLoss()

optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# train loop
# %%
for epoch in range(num_epochs):
    for batch_idx, (data, targets) in enumerate(train_dataloader):
        data = data.to(device=device)
        targets = targets.to(device=device)

        # data = data.reshape(data.shape[0], -1)
        data = data.squeeze(1)
        # print(data.shape)
        # forward 
        scores = model(data)
        loss = criterion(scores, targets)
        # 先清零，然后在开始反向计算backbard
        optimizer.zero_grad()
        loss.backward()
        # gradient descent , update the grad , change the weights and bias
        optimizer.step()

# %%
# 上面相当于，已经完成了，训练了。。目前就是要进行结果检测了。。
# 参数的weights和bias已经全部写入到model中了。
def check_accuracy(loader, model):
    if loader.dataset.train:
        print('we are print training accuracy')
    else:
        print("now print test ACC")
    num_correct = 0
    num_samples = 0
    model.eval()
    with torch.no_grad():
        for x, y in loader:
            # x = x.reshape(x.shape[0], -1)
            x = x.squeeze(1)
            scores = model(x)
            _, predictions = scores.max(1)
            num_correct += (predictions == y).sum()
            num_samples += predictions.size(0)
        print(f"got the num_corrects / num_smaples = {num_correct * 1.0 / (num_samples * 1.0) * 100:.2f}")
    model.train()

check_accuracy(train_dataloader, model)
check_accuracy(test_dataloader, model)


# %%
