import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from matplotlib import pyplot as plt
from torchvision import datasets, transforms

BATCH_SIZE = 512  # 批次大小
EPOCHS = 20  # 总共训练批次
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'  # 代码运行环境
origin_train_losses = []
origin_train_counter = []
improved_train_losses = []
improved_train_counter = []


class ConvNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5, padding=2)     # or in_channels=1
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, padding=0)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        # self.conv3 = nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5)
        self.fc1 = nn.Linear(in_features=576, out_features=120)     # or in_features=400
        self.fc2 = nn.Linear(in_features=120, out_features=84)
        self.fc3 = nn.Linear(in_features=84, out_features=10)

    def forward(self, x):
        in_size = x.size(0)
        out = F.relu(self.conv1(x))
        out = self.pool1(out)  # 12
        out = F.relu(self.conv2(out))
        out = self.pool2(out)
        out = out.view(in_size, -1)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        out = F.log_softmax(out, dim=1)
        return out


class ImprovedNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=9, padding=0)    # or in_channels=1
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=7, padding=3)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.conv3 = nn.Conv2d(in_channels=128, out_channels=192, kernel_size=3, padding=1)
        self.conv4 = nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, padding=1)
        self.fc1 = nn.Linear(in_features=6912, out_features=480)    # or in_features=4800
        self.fc2 = nn.Linear(in_features=480, out_features=84)
        self.fc3 = nn.Linear(in_features=84, out_features=10)

    def forward(self, x):
        in_size = x.size(0)
        out = F.relu(self.conv1(x))
        out = self.pool1(out)
        out = F.relu(self.conv2(out))
        out = self.pool2(out)
        out = F.relu(self.conv3(out))
        out = F.relu(self.conv4(out))
        out = out.view(in_size, -1)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        out = F.log_softmax(out, dim=1)
        return out


def train(model, device, train_loader, optimizer, epoch, is_origin):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if (batch_idx + 1) % 30 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100.0 * batch_idx / len(train_loader), loss.item()))
        if is_origin == 1:
            origin_train_losses.append(loss.item())
            origin_train_counter.append((batch_idx * BATCH_SIZE) + ((epoch - 1) * len(train_loader.dataset)))
            torch.save(model.state_dict(), 'models/MNIST_origin.pth')   # 保存参数模型
        else:
            improved_train_losses.append(loss.item())
            improved_train_counter.append((batch_idx * BATCH_SIZE) + ((epoch - 1) * len(train_loader.dataset)))
            torch.save(model.state_dict(), 'models/Cifar10work1.pth')


def test(model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item()  # 将一批的损失相加
            pred = output.max(1, keepdim=True)[1]  # 找到概率最大的下标
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100.0 * correct / len(test_loader.dataset)))


def loss_plot():
    fig = plt.figure()
    plt.plot(origin_train_counter, origin_train_losses, color='blue')
    plt.plot(improved_train_counter, improved_train_losses, color='red')
    plt.legend(['Origin Loss', 'Improved Loss'], loc='upper right')
    plt.xlabel('number of training examples seen')
    plt.ylabel('negative log likelihood loss')
    plt.savefig('./images/MNIST_loss_compare.png')      # 将loss比较图保存到本地
    plt.show()


if __name__ == "__main__":
    # 加载数据集
    train_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10('data', train=True, download=False,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           # transforms.Normalize((0.1307,), (0.3081,))   # MNIST
                           transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2615))   # CIFAR10
                       ])),
        batch_size=BATCH_SIZE, shuffle=False)

    test_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10('data', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            # transforms.Normalize((0.1307,), (0.3081,))
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2615))
        ])),
        batch_size=BATCH_SIZE, shuffle=True)

    # 定义网络
    if DEVICE == 'cuda':
        origin_model = ConvNet().cuda()
        improved_model = ImprovedNet().cuda()
    else:
        origin_model = ConvNet().cpu()
        improved_model = ImprovedNet().cpu()
    origin_optimizer = optim.Adam(origin_model.parameters())  # 使用Adam优化器
    improved_optimizer = optim.Adam(improved_model.parameters())

    # 先训练初始网络模型
    time_start = time.time()
    for epoch in range(1, EPOCHS + 1):
        train(origin_model, DEVICE, train_loader, origin_optimizer, epoch, 1)
        test(origin_model, DEVICE, test_loader)
    time_end = time.time()
    origin_time_total = (time_end - time_start) * 1000
    print('totally spent time %s ms' % origin_time_total)

    # 再训练改进后的网络模型
    time_start = time.time()
    for epoch in range(1, EPOCHS + 1):
        train(improved_model, DEVICE, train_loader, improved_optimizer, epoch, 0)
        test(improved_model, DEVICE, test_loader)
    time_end = time.time()
    improved_time_total = (time_end - time_start) * 1000
    print('totally spent time %s ms' % improved_time_total)

    # 训练过程loss变化可视化
    loss_plot()
