import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt

# Initialize default parameters
n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 10
random_seed = 1
torch.manual_seed(random_seed)

train_dataset = datasets.MNIST(root='../dataset/mnist',
                               train=True,
                               transform=transforms.ToTensor(),
                               download=True)
test_dataset = datasets.MNIST(root='../dataset/mnist',
                              train=False,
                              transform=transforms.ToTensor(),
                              download=True)
train_dataloader = DataLoader(dataset=train_dataset,
                              batch_size=32,
                              shuffle=True)
test_dataloader = DataLoader(dataset=test_dataset,
                             batch_size=32,
                             shuffle=False)

for data, target in test_dataloader:
    print(data.shape, target)
# checkout information about dataset
examples = enumerate(test_dataloader)
batch_idx, (example_data, example_targets) = next(examples)
print(batch_idx)
# print(example_targets)
# print(example_data.shape)
# # print(next(iter(train_dataloader))[0].shape)
# train_features, train_labels = next(iter(train_dataloader))
# print(f"Feature batch shape: {train_features.size()}")
# print(f"Labels batch shape: {train_labels.size()}")
# train_features, train_labels = next(iter(train_dataloader))

# plot 6 examples
# fig = plt.figure()
# for i in range(6):
#     plt.subplot(2, 3, i + 1)
#     plt.tight_layout()
#     plt.imshow(train_features[i][0], cmap='gray', interpolation='none')
#     plt.title("Ground Truth: {}".format(train_labels[i]))
#     plt.xticks([])
#     plt.yticks([])
# plt.show()


# Design model using Class
# class Net(nn.Module):
#     def __init__(self):
#         super(Net, self).__init__()
#         self.conv1 = nn.Conv2d(1, 10, kernel_size=(3,))
#         self.conv2 = nn.Conv2d(10, 20, kernel_size=(3,))
#         self.conv2_drop = nn.Dropout2d()
#         self.fc1 = nn.Linear(320, 50)
#         self.fc2 = nn.Linear(50, 10)
#
#     def forward(self, x):
#         x = F.relu(F.max_pool2d(self.conv1(x), 2))
#         x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
#         x = x.view(-1, 320)
#         x = F.relu(self.fc1(x))
#         x = F.dropout(x, training=self.training)
#         x = self.fc2(x)
#         return F.log_softmax(x)
#
#
# network = Net()
# optimizer = optim.SGD(network.parameters(), lr=0.01, momentum=0.5)
#
# train_losses = []
# train_counter = []
# test_losses = []
# test_counter = [i * len(train_dataloader.dataset) for i in range(n_epochs + 1)]
#
#
# def train(epoch):
#     network.train()
#     for batch_idx, (data, target) in enumerate(train_dataloader):
#         optimizer.zero_grad()
#         output = network(data)
#         loss = F.nll_loss(output, target)
#         loss.backward()
#         optimizer.step()
#         if batch_idx % log_interval == 0:
#             print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
#                 epoch, batch_idx * len(data), len(train_dataloader.dataset),
#                        100. * batch_idx / len(train_dataloader), loss.item()))
#             train_losses.append(loss.item())
#             train_counter.append(
#                 (batch_idx * 64) + ((epoch - 1) * len(train_dataloader.dataset)))
#             torch.save(network.state_dict(), './model.pth')
#             torch.save(optimizer.state_dict(), './optimizer.pth')
#
#
# if __name__ == "__main__":
#     train(1)
