import torch
import torch.optim as optim
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import DataLoader
from torchvision import datasets,transforms
import torchvision
import argparse

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', metavar='PATH', required=True,
                    help='path to dataset')
parser.add_argument('--batchSize', '-b', default=256, type=int, metavar='N',
                    help='mini-batch size (1 = pure stochastic) Default: 256')
parser.add_argument('--nEpochs', default=90, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--nWorkers', default=2, type=int, metavar='N',
                    help='number of workers to load batch')
parser.add_argument('--lr', default=7e-4, type=float, metavar='N',
                    help='learning rate')
parser.add_argument('--savePath', metavar='PATH', required=True,
                    help='model save path')
parser.add_argument('--resume',default=False,type=bool,required=True)
args = parser.parse_args()



class Dataset():
    def __init__(self):
        self.transform = transforms.Compose([
            transforms.RandomSizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],
                                 std = [ 0.229, 0.224, 0.225 ])
        ])
        self.train_dir = args.data + 'train'
        self.val_dir = args.data + 'val'
        self.train_set = datasets.ImageFolder(self.train_dir,self.transform)
        self.val_set = datasets.ImageFolder(self.val_dir,self.transform)
        self.train_loader = torch.utils.data.DataLoader(
                            self.train_set, batch_size=args.batchSize, shuffle=True, num_workers=args.nWorkers)
        self.val_loader = torch.utils.data.DataLoader(
                            self.val_set, batch_size=args.batchSize, shuffle=True, num_workers=args.nWorkers)

def train_epoch(epoch_id=None,dataset=None,model=None,criterion=None,optimizer=None):
    print('epoch:%d, total batch:%d'%(epoch_id,len(data.train_loader)))
    for index, [image, label] in enumerate(data.train_loader):
        if torch.cuda.is_available():
            image, label = image.cuda(), label.cuda()
        # forward
        optimizer.zero_grad()
        out = model(image)
        loss = criterion(out,label)

        # backward
        loss.backward()
        optimizer.step()

        # calculate accuracy
        _,pred = torch.max(out,1)
        acc = (pred == label).int().sum().item()/pred.size()[0]

        if index % 10 == 0:
            print('epoch:%d--process:%.2f%%,loss:%.4f,train accuracy:%.2f%%'%(epoch_id,index*100/len(data.train_loader),loss.item(),acc*100))
        if index % 100 == 0 and index > 99:
            print('saved model as last checkpoint to ' + args.savePath)
            torch.save(model.state_dict(),args.savePath)

    return loss

def val_epoch(epoch_id=None,dataset=None,model=None):
    cnt = 0
    for index, [image, label] in enumerate(data.val_loader):
        if torch.cuda.is_available():
            image, label = image.cuda(), label.cuda()
        # forward
        optimizer.zero_grad()
        out = model(image)

        _,pred = torch.Max(out,1)
        acc += (pred == label).sum().item()/pred.size()[0]
        cnt += 1
    acc = acc / cnt
    print('val accuracy:%.2f%%'%(acc*100))

def train(dataset=None,epoch_num=None,lr=None):
    model = Resnet18(0.2)
    if args.resume:
        model.load_state_dict(torch.load(args.savePath))
    if torch.cuda.is_available():
        model = model.cuda()
    criterion = nn.CrossEntropyLoss()
    # optimizer= optim.Adam(model.parameters(),lr=lr)
    optimizer = optim.SGD(model.parameters(),lr=lr, momentum=0.9)
    for epoch in range(epoch_num):
        train_loss = train_epoch(epoch,dataset,model,criterion,optimizer)
        val_epoch(epoch,dataset,model)
        print('saved model as last checkpoint to ' + args.savePath)
        torch.save(model.state_dict(),args.savePath)

if __name__ == '__main__':
    data = Dataset()
    train(data,args.nEpochs,args.lr)
    
        
