#!/usr/bin python3
# -*- coding: utf-8 -*-
# @Time    : 19-8-20 下午2:19
# @Author  : Aries
# @Site    : 
# @File    : 02-CNN_Image_Classification_live.py
# @Software: PyCharm
import numpy as np
import torch
import cv2
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms,models
import time
model_url={
    'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
    'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth',
    'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth',
    'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth',
    'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth',
}

#定义一个基于ConvNet的简单神经网络
class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(1, 64, 3),  # 28*28 -> (28+1-5)  26
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2,stride=2), #13*13
            nn.Conv2d(64, 256, 3),  #11*11
            nn.BatchNorm2d(256),
            nn.Dropout(0.8),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=1), #10*10
            nn.Conv2d(256, 128, 3),  # 8*8
            nn.BatchNorm2d(128),
            nn.Dropout(0.8),
            nn.ReLU(),


        )
        self.classifier = nn.Sequential(
            nn.Linear(8 * 8 * 128, 2048),
            nn.Linear(2048,256),
            nn.ReLU(),
            nn.Linear(256, 10),
        )

        # self.conv1=nn.Conv2d(1,50,5,1)  #28*28 -> (28+1-5)  24*24
        # self.conv2=nn.Conv2d(50,100,5,1)
        # self.conv3 = nn.Conv2d(100, 64, 1, 1)
        # self.fc1 = nn.Linear(3*3*64,256)
        # self.fc2 = nn.Linear(256,10)

        # self.fc3 = nn.Linear(1000, 200)
        # self.fc4 = nn.Linear(200, 10)


    def forward(self, x):
        # x : 1*28*28
        out = self.features(x)
        out = out.view(out.size(0), -1)
        x = self.classifier(out)
        return x




mnist_data = datasets.FashionMNIST("./fashion_mnist_data",train=True, download=True,
                            transform=transforms.Compose([
                                transforms.ToTensor(),
                            ]))

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 64
data = [d[0].data.cpu().numpy() for d in mnist_data]
data_mean = np.mean(data)
data_std = np.std(data)
train_dataloader = torch.utils.data.DataLoader(
        datasets.FashionMNIST("./fashion_mnist_data", train=True, download=True,
                              transform=transforms.Compose([
                                    transforms.ToTensor(),
                                    transforms.Normalize((data_mean,),(data_std,))
                              ])),
        batch_size=batch_size,shuffle=True,num_workers=2,pin_memory=True)

test_dataloader = torch.utils.data.DataLoader(
        datasets.FashionMNIST("./fashion_mnist_data", train=False, download=True,
                              transform=transforms.Compose([
                                    transforms.ToTensor(),
                                    transforms.Normalize((data_mean,),(data_std,))
                              ])),
        batch_size=batch_size,shuffle=False,num_workers=1,pin_memory=True)


def train(model, device, train_loader, optimizer, epoch):
    model.train()

    for index, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        # data = F.max_pool2d(data, 4, 4)  # 4 * 4
        # data = torch.cat((data, data, data), dim=1)

        pred = model(data)  # batch_size * 10
        pred=F.log_softmax(pred, dim=1)
        loss = F.nll_loss(pred, target)
        # SGD
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if index % 100 == 0:
            print("Train Epoch:{},iteration:{}, Loss:{}".format(epoch, index, loss.item()))


def test(model, device, test_loader):
    model.eval()
    total_loss = 0
    correct = 0
    with torch.no_grad():
        for index, (data, target) in enumerate(test_loader):
            data, target = data.to(device), target.to(device)

            # data = F.max_pool2d(data, 4, 4)  # 4 * 4
            # data=torch.cat((data, data, data), dim=1)

            output = model(data)  # batch_size * 10
            output = F.log_softmax(output, dim=1)
            total_loss += F.nll_loss(output, target,reduction="sum").item()
            pred = output.argmax(dim=1)  # batch_size * 1
            correct +=pred.eq(target.view_as(target)).sum().item()

    total_loss /= len(test_loader.dataset)
    acc = correct / len(test_loader.dataset) * 100
    print("Test loss:{},Accuracy:{}".format(total_loss,acc))

if __name__=="__main__":
    lr=0.001
    lr1= 0.0005
    lr2=0.0001
    momentum = 0.5
    model = Net().to(device)
    # model = models.vgg19_bn().to(device)

    optimizer = optim.Adam(model.parameters(),lr=lr)
    optimizer1 = optim.Adam(model.parameters(),lr=lr1)

    optimizer2 = optim.Adam(model.parameters(), lr=lr2)

    num_epochs = 30
    for epoch in range(num_epochs):
        t1=time.time()
        if epoch<5:
            train(model, device, train_dataloader, optimizer, epoch)
        elif epoch<15 and 5<epoch:
            train(model, device, train_dataloader, optimizer1, epoch)
        else:
            train(model, device, train_dataloader, optimizer2, epoch)

        t2=time.time()
        print(t2-t1)
        test(model,device,test_dataloader)

    torch.save(model.state_dict(),"mnist_cnn.pt")