import torch
from torch import nn
from torch.nn import functional as F
import torchvision
from    torch import optim

is_gpu = torch.cuda.is_available()
gpu_nums = torch.cuda.device_count()
gpu_index = torch.cuda.current_device()
print(is_gpu,gpu_nums,gpu_index)
device = torch.device('cuda:0')

batch_size = 128

# step1. load dataset
train_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data', train=True, download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,), (0.3081,))
                               ])),
    batch_size=batch_size, shuffle=True)

test_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,), (0.3081,))
                               ])),
    batch_size=batch_size, shuffle=False)

sample = next(iter(train_loader))

print(sample[0].shape, sample[1].shape)

def one_hot(label, depth=10):
    out = torch.zeros(label.size(0), depth)
    idx = torch.LongTensor(label).view(-1, 1)
    out.scatter_(dim=1, index=idx, value=1)
    return out

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.layer1 = nn.Linear(784, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, 64)
        self.layer4 = nn.Linear(64, 10)

    def forward(self, input):
        x = F.relu(self.layer1(input))
        x = F.relu(self.layer2(x))
        x = F.relu(self.layer3(x))
        out = self.layer4(x)

        return out

model = Model().to(device)
# 打印模型的 state_dict
print("Model's state_dict:")
for param_tensor in model.state_dict():
    print(param_tensor, "\t", model.state_dict()[param_tensor].size())

optimizer = optim.Adam(model.parameters(), lr=0.01)

def main():

    for epoch in range(3):
        for step, (x, y) in enumerate(train_loader):

            x = x.view(x.shape[0], 28*28)
            x = x.to(device)
            out = model(x)

            y_onehot = one_hot(y).to(device)
            loss = F.mse_loss(out, y_onehot)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step%100 == 0:

               print(epoch, step, loss.item())

        totalacc = 0
        for x, y in test_loader:
            x = x.view(x.shape[0], 28*28)
            x = x.to(device)
            y = y.to(device)
            out = model(x)
            pred = out.argmax(dim=1)
            sumauccary = pred.eq(y).sum().float().item()
            totalacc += sumauccary

        total_num = len(test_loader.dataset)
        acc = totalacc / total_num
        print('acc:', acc)
    # # 保存整个网络
    # torch.save(net, PATH)
    # # 保存网络中的参数, 速度快，占空间少
    # torch.save(net.state_dict(), PATH)
    # # --------------------------------------------------
    # # 针对上面一般的保存方法，加载的方法分别是：
    # model_dict = torch.load(PATH)
    # model_dict = model.load_state_dict(torch.load(PATH))

    path = 'checkpoint/all_state.pth'
    torch.save({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_loss': loss,
                'optimizer': optimizer.state_dict()}, path)
    PATH = 'mnist_model.pth'
    torch.save(model, PATH)
    print('保存网络')
    PATH = 'mnist_weight.pth'
    torch.save(model.state_dict(), PATH)
    print('保存网络权值')

checkpoint = 'check'
def load_checkpoint(model, checkpoint_PATH, optimizer):
    if checkpoint != None:
        model_CKPT = torch.load(checkpoint_PATH)
        model.load_state_dict(model_CKPT['state_dict'])
        print('loading checkpoint!')
        optimizer.load_state_dict(model_CKPT['optimizer'])
    return model, optimizer
if __name__ == '__main__':
    main()
