import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import matplotlib.pyplot as plt
import time
import sys

print("torch的版本:\t",torch.__version__)

start=time.time()

# 定义超参数
BATCH_SIZE = 32
nEpochs = 16
numPrint = 1000

# cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

transform = transforms.Compose([
    transforms.Resize((32, 32)),
    transforms.ToTensor(),
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])])

print("device:\t",device)
# 加载数据集 (训练集和测试集)
trainset = torchvision.datasets.CIFAR10(root='./', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
testset = torchvision.datasets.CIFAR10(root='./', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)
print("device:\t",device)
# 定义神经网络
class AlexNet(nn.Module):  # 训练 ALexNet
    def __init__(self):
        super(AlexNet, self).__init__()
        # 五个卷积层
        self.conv1 = nn.Sequential(  # 输入 32 * 32 * 3
            nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=1),  # (32-3+2)/1+1 = 32
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)  # (32-2)/2+1 = 16
        )
        self.conv2 = nn.Sequential(  # 输入 16 * 16 * 6
            nn.Conv2d(in_channels=6, out_channels=16, kernel_size=3, stride=1, padding=1),  # (16-3+2)/1+1 = 16
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)  # (16-2)/2+1 = 8
        )
        self.conv3 = nn.Sequential(  # 输入 8 * 8 * 16
            nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),  # (8-3+2)/1+1 = 8
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)  # (8-2)/2+1 = 4
        )
        self.conv4 = nn.Sequential(  # 输入 4 * 4 * 64
            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),  # (4-3+2)/1+1 = 4
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)  # (4-2)/2+1 = 2
        )
        self.conv5 = nn.Sequential(  # 输入 2 * 2 * 128
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),  # (2-3+2)/1+1 = 2
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2, padding=0)  # (2-2)/2+1 = 1
        )  # 最后一层卷积层，输出 1 * 1 * 128
        # 全连接层
        self.dense = nn.Sequential(
            nn.Linear(128, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10)
        )

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = x.view(-1, 128)
        x = self.dense(x)
        return x


# 为每个优化器创建一个Net
net_SGD = AlexNet()
# 初始化优化器
opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=0.01)

# 定义损失函数
loss_function = nn.CrossEntropyLoss()  # 交叉熵损失

# 记录training时不同神经网络的loss值
losses_history =[]

# 记录training时不同神经网络的top1Acc值
top1Acc_history =[]

# #
# nets = net_SGD
# #
# optimizers =opt_SGD

# 使用测试数据测试网络
def Accuracy():
    correct = 0
    total = 0
    with torch.no_grad():  # 训练集中不需要反向传播
        for data in testloader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)  # 将输入和目标在每一步都送入GPU
            outputs = net_SGD(images)
            _, predicted = torch.max(outputs.data, 1)  # 返回每一行中最大值的那个元素，且返回其索引
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    return 100.0 * correct / total

# optime = ['SGD']
# name_indx = 0
#
# print('using ', optime[name_indx])

for epoch in range(nEpochs):
    running_loss = 0.0
    print('批次Epoch:', epoch + 1, '开始训练Training...')
    for i, data in enumerate(trainloader, 0):
        inputs, labels = data  # 取数据
        inputs, labels = inputs.to(device), labels.to(device)  # 将输入和目标在每一步都送入GPU
        opt_SGD.zero_grad()  # 将梯度置零
        # 训练
        net = net_SGD.to(device)
        outputs = net_SGD(inputs)
        loss = loss_function(outputs, labels).to(device)

        loss.backward()  # 反向传播
        opt_SGD.step()

        running_loss += loss.item()
        if i % numPrint == 999:  # 每 numPrint 张图片，打印一次
            losses_history.append(running_loss / numPrint)
            print('批次epoch: %d\t 批量batch: %d\t 损失loss: %.6f' % (epoch + 1, i + 1, running_loss / numPrint))
            running_loss = 0.0
            top1Acc = Accuracy()
            print('准确率Accuracy of the network on the 10000 test images: %d %%' % top1Acc)
            top1Acc_history.append(top1Acc)
# model_name = optime[name_indx] + '_model.pkl'
model_name = 'SGD_my' + '_model.pkl'
save_path = './' + model_name
torch.save(net, save_path)
# name_indx += 1

# for i, l_his in enumerate(losses_history):
plt.plot(losses_history, label='SGD')
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.title('Loss')
plt.ylim((0, 3))
plt.show()

# for i, t_his in enumerate(top1Acc_history):
plt.plot(top1Acc_history, label='SGD')
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Top1Acc')
plt.title('Acc')
plt.ylim((0, 100))
plt.show()

end=time.time()
print("程序花费的时间:\t",end-start)