import torch
import torch.optim as optim
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
import torchvision
import argparse
import torchvision.models as models
from model.resnet import Resnet
from utils.dataset import CIFAR10,imagenet

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', metavar='PATH', required=True,
                    help='path to dataset')
parser.add_argument('--batchSize', '-b', default=256, type=int, metavar='N',
                    help='mini-batch size (1 = pure stochastic) Default: 256')
parser.add_argument('--nEpochs', '-e', default=240, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--nWorkers', '-w', default=2, type=int, metavar='N',
                    help='number of workers to load batch')
parser.add_argument('--lr', default=7e-4, type=float, metavar='N',
                    help='learning rate')
parser.add_argument('--savePath', metavar='PATH', required=True,
                    help='model save path')
parser.add_argument('--dataType',default='CIFAR10',type=str,required=False)
parser.add_argument('--gpus',default=[0],type=list,required=False)
parser.add_argument('--resume',default=False,type=bool)
args = parser.parse_args()

def train_epoch(epoch_id=None,dataset=None,model=None,criterion=None,optimizer=None):
    f = open(args.savePath + '.train.log','a')
    print('epoch:%d, total batch:%d'%(epoch_id,len(dataset.train_loader)))
    for index, [image, label] in enumerate(dataset.train_loader):
        if torch.cuda.is_available():
            image, label = image.cuda(), label.cuda()
        # forward
        optimizer.zero_grad()
        out = model(image)
        loss = criterion(out,label)

        # backward
        loss.backward()
        optimizer.step()

        # calculate accuracy
        _,pred = torch.max(out,1)
        acc = (pred == label).int().sum().item()/pred.size()[0]

        if index % 10 == 0:
            print('epoch:%d--process:%.2f%%,loss:%.4f,train accuracy:%.2f%%'%(epoch_id,index*100/len(dataset.train_loader),loss.item(),acc*100))
            f.write('%d\t%.4f\t%.4f\n'%(epoch_id,loss.item(),acc*100))
        if index % 500 == 0 and index > 400:
            print('saved model as last checkpoint to ' + args.savePath)
            torch.save(model.state_dict(),args.savePath)
    f.close()
    return loss

def val_epoch(epoch_id=None,dataset=None,model=None):
    cnt, acc = 0, 0
    for index, [image, label] in enumerate(dataset.val_loader):
        if torch.cuda.is_available():
            image, label = image.cuda(), label.cuda()
        # forward
        out = model(image)

        _,pred = torch.max(out,1)
        acc += (pred == label).sum().item()/pred.size()[0]
        cnt += 1
    acc = acc / cnt
    print('val accuracy:%.2f%%'%(acc*100))

def adj_lr(epoch_id=None,optim=None):
    lr = args.lr * (0.1 ** (epoch_id // 20))
    if lr < 1e-4:
        lr = 1e-4
    print('set lr to: %.4f'%lr)
    for param_group in optim.param_groups:
    	param_group['lr'] = lr

def train():
    # model = Resnet(50,args.dataType)
    model = models.resnet50(pretrained=True)

    # for param in model.parameters():
     #   param.requires_grad = False

    model.avgpool = nn.AvgPool2d(1, stride=1)
    model.fc = nn.Sequential(
        nn.Linear(model.fc.in_features, 256),
        nn.ReLU(),
        nn.Dropout(0.4),
        nn.Linear(256, 10),
        nn.LogSoftmax(dim=1)
    )


    if len(args.gpus) > 1:
        model = torch.nn.DataParallel(model)
    
    if torch.cuda.is_available():
        model = model.cuda()

    if args.resume:
        model.load_state_dict(torch.load(args.savePath))
        f = open(args.savePath + '.train.log','r')
        last_epoch = int(f.readlines()[-1].split('\t')[0])
    else:
        f = open(args.savePath + '.train.log','w')
        f.close()
        last_epoch = 0
    
    if args.dataType == 'CIFAR10':
        dataset = CIFAR10(args.data,args.batchSize,args.nWorkers)
    else:
        dataset = imagenet(args.data,args.batchSize,args.nWorkers)
    
    criterion = nn.CrossEntropyLoss()
    # optimizer= optim.Adam(model.parameters(),lr=lr)
    optimizer = optim.SGD(model.parameters(),lr=args.lr, momentum=0.9)
    for epoch in range(last_epoch + 1, args.nEpochs):
        # cross validation
        # dataset.random_split()
        train_loss = train_epoch(epoch,dataset,model,criterion,optimizer)
        val_epoch(epoch,dataset,model)
        adj_lr(epoch,optimizer)
        print('saved model as last checkpoint to ' + args.savePath)
        torch.save(model.state_dict(),args.savePath)

if __name__ == '__main__':
    train()
    
        
