from pathlib import Path
from matplotlib import pyplot
from torch.utils.data import TensorDataset, DataLoader
import pickle
import gzip
import torch
import torch.nn.functional as F
import torch.optim as optim
import numpy as np

DATA_PATH = Path("../dataset")
PATH = DATA_PATH / "mnist"
PATH.mkdir(parents=True, exist_ok=True)
FILENAME = "mnist.pkl.gz"
with gzip.open((PATH / FILENAME).as_posix(), "rb") as f:
    ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
# pyplot.imshow(x_train[0].reshape((28, 28)), cmap="gray")
# pyplot.show()
# 注意数据需转换成tensor才能参与后续建模训练
x_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid))
train_dataset = TensorDataset(x_train, y_train)
val_dataset = TensorDataset(x_valid, y_valid)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# print(x_train.shape, y_train.shape)
# print(y_train[0].item())
# print(y_train[:10])
# xb = x_train[:64]

class MnistNN(torch.nn.Module):
    def __init__(self):
        super(MnistNN, self).__init__()
        self.linear_01 = torch.nn.Linear(784, 512)
        self.linear_02 = torch.nn.Linear(512, 256)
        self.linear_03 = torch.nn.Linear(256, 10)
        self.dropout = torch.nn.Dropout(0.5)

    def forward(self, x):
        x = F.relu(self.linear_01(x))
        x = self.dropout(x)
        x = F.relu(self.linear_02(x))
        x = self.dropout(x)
        x = self.linear_03(x)
        return x


mnistNN = MnistNN()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
mnistNN.to(device)
# # Construct loss function and optimizer
loss_function = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(mnistNN.parameters(), lr=0.01, momentum=0.5)


def train(epoch):
    mnistNN.train()
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        inputs, target = data
        # Set GPU
        inputs, target = inputs.to(device), target.to(device)
        # Forward + Backward + Update
        outputs = mnistNN(inputs)
        loss = loss_function(outputs, target)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d,%5d] loss: %.6f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in val_loader:
            mnistNN.eval()
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            # print(labels.shape[0])
            outputs = mnistNN(images)
            _, predicted = torch.max(outputs.data, dim=1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print('Accuracy on test set : %3d %%' % (100 * correct / total))


def get_model():
    mnistNN = MnistNN()
    # print(device)
    # mnistNN.to(device)
    # Construct loss function and optimizer
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = optim.SGD(mnistNN.parameters(), lr=0.01, momentum=0.5)
    return mnistNN, loss_function, optimizer


# 一般在训练模型时加上model.train()，这样会正常使用Batch Normalization和 Dropout
# 测试的时候一般选择model.eval()，这样就不会使用Batch Normalization和 Dropout
def loss_batch(model, loss_func, x_input, target, opti=None):
    model.to(device)
    x_input.to(device)
    target.to(device)
    loss_output = loss_func(model(x_input), target)
    if opti is not None:
        loss_output.backward()
        opti.step()
        opti.zero_grad()
    return loss_output.item(), len(x_input)


def train_val_MnistNN(model, loss_func, opti, train_data, val_data, epochs):
    for epoch in range(epochs):
        model.train()
        for x, y in train_data:
            print(device)
            # x, y = x.to(device), y.to(device)
            loss_batch(model, loss_func, x, y, opti)

        model.eval()
        with torch.no_grad():
            print([loss_batch(model, loss_func, x_val, y_val) for x_val, y_val in val_data])
            losses, nums = zip(*[loss_batch(model, loss_func, x_val, y_val) for x_val, y_val in val_data])
            val_loss = np.sum(losses) / np.sum(nums)
            print("epoch: ", epoch, "loss: ", val_loss)


if __name__ == "__main__":
    # for epoch in range(30):
    #     train(epoch)
    #     test()
    # train_loader, val_loader = get_data(train_dataset, val_dataset, 32)
    # model, loss_func, opti = get_model()
    # train_val_MnistNN(model, loss_func, opti, train_loader, val_loader, 30)
    # print(xb)
    # print(mnistNN)
    train_batch = next(iter(train_loader))
    print(train_batch[0].shape)
    for name, parameter in mnistNN.named_parameters():
        print(name, parameter, parameter.size())
