import torch
import torch.nn as nn
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt


trans = [transforms.ToTensor()]
trans.insert(0, transforms.Resize(224))
trans = transforms.Compose(trans)
batch_size = 256

training_data = datasets.MNIST(
    root="./data",
    # True 从 training.pt 创建数据
    train=True,
    download=True,
    transform=trans,
)

test_data = datasets.MNIST(
    root="./data",
    # False 从 test.pt 创建数据
    train=False,
    download=True,
    transform=trans,
)

train_iter = data.DataLoader(training_data, batch_size, shuffle=True, num_workers=2)
test_iter = data.DataLoader(test_data, batch_size, shuffle=False, num_workers=2)

# 使用 iter(迭代器) 和 next 方法取出第一个数组
train_features, train_labels = next(iter(train_iter))
print(f"Feature batch shape: {train_features.size()}")
print(f"Labels batch shape: {train_labels.size()}")
print(f"batch size: {len(iter(train_iter))}")

figure = plt.figure(figsize=(6, 6))
sample_idx = torch.randint(len(training_data), size=(9,))
row, column = 0, 0
# enumerate多用于在for循环中得到计数，利用它可以同时获得索引和值，即需要index和value值的时候可以使用enumerate
for i, pict_index in enumerate(sample_idx):
    img, label = training_data[pict_index]
    figure.add_subplot(3, 3, i + 1)
    plt.title(str(label))
    plt.axis("off")
    plt.imshow(img.squeeze(), cmap="gray")
plt.show()


# # nn.Modeule 是所有网络层的父类，pytorch 提供的线性层、卷积层也是集成自这个类
# class AlexNet(nn.Module):
#     def __init__(self):
#         super().__init__()

#         # 定义网络结构，搭建神经网络的模块被按照被传入构造器的顺序添加到nn.Sequential()容器中
#         self.features == nn.Sequential(
#             # 卷积层
#             nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
#             # 线性整流层
#             nn.ReLU(inplace=True), # inplace = True ,会改变输入数据的值,节省反复申请与释放内存的空间与时间
#             # 池化层
#             nn.MaxPool2d(kernel_size=3, stride=2, padding=0),

#             nn.Conv2d(64, 192, kernel_size=5, stride=1, padding=2),
#             nn.ReLU(inplace=True),
#             nn.MaxPool2d(kernel_size=3, stride=2),

#             nn.Conv2d(192, 384, kernel_size=3, stride=1, padding=1),
#             nn.ReLU(inplace=True),


#             nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
#             nn.ReLU(inplace=True),

#             nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
#             nn.ReLU(inplace=True),
#             nn.MaxPool2d(kernel_size=3),
#         )

#         self.classifier = nn.Sequential(
#             nn.Dropout(p=0.5, inplace=False),
#             # 全连接层
#             nn.Linear(in_features=9216, out_features=4096, bias=True), # 输入神经元个数，输出神经元个数，是否包含偏置
#             nn.ReLU(inplace=True),
#             nn.Dropout(p=0.5, inplace=False),
#             nn.Linear(in_features=4096, out_features=4096, bias=True),
#             nn.ReLU(inplace=True),
#             nn.Linear(in_features=4096, out_features=10, bias=True)
#         )

#         def forward(self, x):
#             x = self.features(x)
#             x = torch.flatten(x)
#             x = self.classifier(x)
#             return x


# learning_rate = 0.001
# batch_size = 256
# # 优化器，优化参数，采用了 SGD 随机梯度下降算法
# optimizer = torch.optm.SGD(model.)
