from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets,transforms
from torch.optim.lr_scheduler import StepLR

# visual package library:matplotlib opencv  pillow  scipy(numpy)(matlab)

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d(1,32,3,1)        # convolution layer 1
        self.conv2 = nn.Conv2d(32,64,3,1)      # convolution layer 2
        self.dropout1 = nn.Dropout(0.25)       # regularization
        self.dropout2 = nn.Dropout(0.5)        # regularization 
        self.fc1 = nn.Linear(9216,128)          #fully connection layer 1
        self.fc2 = nn.Linear(128,10)            #fully connection layer 2

    def forward(self,x):                                                          
        x = self.conv1(x)
        x = F.relu(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = F.max_pool2d(x,2)
        x = self.dropout1(x)
        x = torch.flatten(x,1)      #flatten as column
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        output = F.log_softmax(x,dim=1)
        return output

def train(args,model,device,train_loader,optimizer,epoch):
    #get into train pattern  "Dropout and batchnorm"
    model.train()
    for batch_idx,(data,target) in enumerate(train_loader):     
        data,target = data.to(device), target.to(device)     #put data and target into GPU
        #random gradient drop
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output,target)
        loss.backward()
        optimizer.step()


        #print(model.conv1.weight.grad)
        #optimizer.zero_grad()
        #print(model.conv1.weight.grad)

        if batch_idx % args.log_interval == 0:
            print('Train Epoch:{}[{}/{}({:.0f}%)]\tLoss:{:.6f}'.format(
                epoch,batch_idx*len(data),len(train_loader.dataset),
                100.*batch_idx/len(train_loader),loss.item()))
            if args.dry_run:
                break

def test(model,device,test_loader):
    # get into test pattern  "Dropout and batchnorm"
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data,target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item() #sum uo batch loss
            pred = output.argmax(dim=1, keepdim=True)    #get the index of the max log_probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    print('\nTest set:Average loss:{:.4f}, Accuracy:{}/{}({:.0f}%)\n'.format(
        test_loss,correct,len(test_loader.dataset),
        100.*correct/len(test_loader.dataset)))


def main():
    # Traning settings
    parser = argparse.ArgumentParser(description='Pytroch MNIST Example')
    parser.add_argument('--batch_size',type = int,default = 64,metavar = 'N',
                        help = 'input batch size for training(dafalt = 64)')
    parser.add_argument('--test_batch_size',type = int, default = 1000,metavar='N',
                        help = 'input batch size for testing(default:1000)')
    parser.add_argument('--epochs',type = int, default = 14, metavar = 'N',
                        help = 'number of epochs to train(default:14)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--dry-run', action='store_true', default=False,
                        help='quickly check a single pass')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()
    #args.name  (parameters)
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    train_kwargs = {'batch_size': args.batch_size}
    test_kwargs = {'batch_size':args.test_batch_size}
    if use_cuda:
        cuda_kwargs = {'num_workers':1,
        'pin_memory':True,
        'shuffle':True}
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,),(0.3081,))
    ])
    dataset1 = datasets.MNIST('./MNIST',train = True,download = True,
                              transform = transform)
    dataset2 = datasets.MNIST('./MNIST',train = False,
                        transform = transform)
    train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2,**test_kwargs)

    model = Net().to(device)                                          #put net into GPU
    optimizer = optim.Adadelta(model.parameters(), lr = args.lr)             #implement Adadelta algorithm

    scheduler = StepLR(optimizer,step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):
        train(args,model,device,train_loader,optimizer,epoch)
        test(model,device,test_loader)
        scheduler.step()

    if args.save_model:
        torch.save(model.state_dict(),"mnist_cnn.pth")  

if __name__ == "__main__":
    main()




