from torchvision import datasets, transforms
import torch
from tensorboardX import SummaryWriter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

summary_writer = SummaryWriter("./runs")
idx = 0
FR = 0.02  # 学习率
BATCH = 512  # 批处理中每批样本个数
EPOCHS = 5  # 总共训练批次
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 让torch判断是否使用GPU，建议使用GPU环境，因为会快很多


class my_cnn(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = nn.Conv2d(1, 10, (5, 5), (1, 1), 2)
        self.conv2 = nn.Conv2d(10, 30, (3, 3), (1, 1), 1)

        self.fc1 = nn.Linear(30 * 7 * 7, 1024)
        self.fc2 = nn.Linear(1024, 10)

    def forward(self, x):
        # BATCH * 1 * 28 * 28
        x = self.conv1(x)  # BATCH * 10 * 28 * 28
        x = F.relu(x)
        x = F.max_pool2d(x, 2, 2)  # BATCH * 10 * 14 * 14
        x = self.conv2(x)  # BATCH * 30 * 14 * 14
        x = F.relu(x)
        x = F.max_pool2d(x, 2, 2)  # BATCH * 30 * 7 * 7
        x = x.view(-1, 30 * 7 * 7)  # batch*20*10*10 -> batch*2000（out的第二维是-1，说明是自动推算，本例中第二维是20*10*10）
        x = self.fc1(x)
        x = F.relu(x)
        x = self.fc2(x)
        x = F.log_softmax(x, dim=1)
        return x


def train(model, device, train_data, optimizer, epoch):
    global idx
    model.train()
    for batch_idx, (data, target) in enumerate(train_data):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()

        output = model(data)
        loss = F.nll_loss(output, target)

        summary_writer.add_scalar("my_cnn", loss.item(), idx)
        idx += 1

        loss.backward()
        optimizer.step()
        if batch_idx % 30 == 0:
            print("训练批次:{} loss:{}".format(epoch, loss.item()))


def test(model, device, test_data):
    all_loss = 0
    all_right = 0
    model.eval()
    with torch.no_grad():
        for data, target in test_data:
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = F.nll_loss(output, target)
            all_loss += loss.item()
            pred = output.max(1, keepdim=True)[1]
            rightFlag = pred.eq(target.view_as(pred)).sum().item()
            all_right += rightFlag

    print("测试集,损失:{}, 正确率:{}".format(all_loss / len(test_data.dataset), all_right / len(test_data.dataset)))


# 下载训练集
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./MNIST_data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=BATCH, shuffle=True)

# 下载测试集
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./MNIST_data', train=False, transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])),
    batch_size=BATCH, shuffle=True)

model = my_cnn().to(DEVICE)
optimizer = optim.Adam(model.parameters())
for i in range(EPOCHS):
    train(model=model, device=DEVICE, train_data=train_loader, optimizer=optimizer, epoch=i)
    test(model=model, device=DEVICE, test_data=test_loader)

torch.save(model.state_dict(), "my_cnn.pt")
summary_writer.close()
