import  torch
import  torch.nn as nn
import  torch.optim as optim
from    torchvision import datasets, transforms

#mnist+MLP数据增强GPU案例

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

#定义训练的基本参数
batch_size=200  #批次大小
learning_rate=0.01  #学习率
epochs=10  #迭代次数

#加载mnist数据
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./MNIST_data', train=True, download=True,
                   transform=transforms.Compose([  #compose包含多个transform
                       transforms.RandomHorizontalFlip(), # 随机水平翻转
                       transforms.RandomVerticalFlip(), # 随机垂直翻转
                       transforms.RandomRotation(15), # 随机旋转
                       transforms.RandomRotation([90, 180]), # 随机旋转
                       transforms.Resize([32, 32]),  #图片大小缩放
                       transforms.RandomCrop([28, 28]), # 随机裁剪
                       transforms.ToTensor(), # 转换成pytorch张量
                       transforms.Normalize((0.1307,), (0.3081,)) #归一化,mean,std
                   ])),
    batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./MNIST_data', train=False, transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])),
    batch_size=batch_size, shuffle=True)


#定义模型MLP类
class MLP(nn.Module):

    def __init__(self):
        super(MLP, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(784, 200),
            nn.LeakyReLU(inplace=True), #LeakyReLu激活函数 泄露Relu,防止z在小于0时的导数为0
            nn.Linear(200, 200),
            nn.LeakyReLU(inplace=True),
            nn.Linear(200, 10),
            nn.LeakyReLU(inplace=True),
        )
    #前向传播
    def forward(self, x):
        x = self.model(x)

        return x
#######################################################
# device = torch.device('cuda:0')
net = MLP().to(device)
# net = MLP()
#指定代价和优化器
optimizer = optim.SGD(net.parameters(), lr=learning_rate)
criteon = nn.CrossEntropyLoss().to(device)
# criteon = nn.CrossEntropyLoss()

global_step = 0

#训练过程
for epoch in range(epochs):  #迭代次数
    # 每个批次(步)的梯度下降
    for batch_idx, (data, target) in enumerate(train_loader):
        #转换成MLP需要的输入数据维度
        data = data.view(-1, 28*28)
        # data, target = data.to(device), target.cuda()
        data, target = data, target

        logits = net(data) #前向传播-->h
        loss = criteon(logits, target)  #代入损失函数

        optimizer.zero_grad()  # 梯度清零
        loss.backward()  #反向传播，求梯度
        # print(w1.grad.norm(), w2.grad.norm())
        optimizer.step()  #走一步，梯度更新

        global_step += 1

        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))

    #每一次迭代的时候, 测试一下测试集效果
    test_loss = 0
    correct = 0
    for data, target in test_loader:  #每一个批次计算数据
        data = data.view(-1, 28 * 28)
        data, target = data.to(device), target.to(device)
        # data, target = data, target
        logits = net(data)
        test_loss += criteon(logits, target).item() # 累加代价值

        pred = logits.argmax(dim=1)  #预测值，多分类-->argmax, pytorch dim相当于keras中的axis
        correct += pred.eq(target).float().sum().item() #eq=equal, 正确的总数


    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))
