#coding:utf-8
import argparse
import os
import shutil
import numpy as np

import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
from dataset import HeightDataset
from model import DetHeight
from utils import save_height_res
from ipdb import set_trace

parser = argparse.ArgumentParser(description='PyTorch Landform Classifier Training')
parser.add_argument('--train_data', metavar='DIR', default='',
                    help='path to training dataset')
parser.add_argument('--test_data', metavar='DIR', default='',
                    help='path to testing data')
parser.add_argument('--epoches', default=10, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--model_save_path', metavar='DIR', default='checkpoint',
                    help='path to save trained models')
parser.add_argument('--model_save_name', default='model.pth', help='model save name')
parser.add_argument('--batch_size', default=32, type=int,
                    metavar='N', help='mini-batch size (default: 32)')
parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,
                    metavar='LR', help='initial learning rate')
parser.add_argument('--train_list', dest='train_list', help='train list')
parser.add_argument('--test_list', dest='test_list', help='test list')
parser.add_argument('--gpu', dest='gpu', default=-1, type=int, help='gpu device, \
    when it is set to be -1, use cpu')
parser.add_argument('--isTrain', dest='isTrain', type=int, default=1, help='is train')
parser.add_argument('--pretrained_model', dest='pretrained_model', default="")
parser.add_argument('--test_res_savepath', dest='test_res_savepath',
        default='res/test_res.txt')
args = parser.parse_args()

best_error = 1e5

if not os.path.exists(args.model_save_path):
    os.mkdir(args.model_save_path)

if args.gpu != -1:
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True

if args.isTrain:
    train_set = HeightDataset(root=args.train_data, txt=args.train_list, 
                train=True, scale_size=(640, 360))
    train_loader = torch.utils.data.DataLoader(train_set,
                batch_size=args.batch_size, shuffle=True,
                num_workers=4, pin_memory=True)
if args.isTrain:
    test_set = HeightDataset(root=args.test_data, txt=args.test_list,
            train=False, scale_size=(640, 360))
else:
    test_set = HeightDataset(root=args.test_data, txt=args.test_list,
            train=False, with_name=True, scale_size=(640, 360))

test_loader = torch.utils.data.DataLoader(test_set,
                batch_size=args.batch_size, shuffle=False,
                num_workers=4, pin_memory=True
)
model = DetHeight()
if args.pretrained_model != "":
    print('load pretrained model %s' % args.pretrained_model)
    model.load_state_dict(torch.load(args.pretrained_model))

criterion = nn.MSELoss()
def L1Loss(pred, label):
    return torch.abs(pred - label).mean()

if args.gpu != -1:
    model = model.cuda()
    criterion = criterion.cuda()

optimizer = torch.optim.SGD(model.parameters(), args.lr,
                            momentum=0.9,
                            weight_decay=5e-4)

def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 2 every 25 epochs"""
    for param_group in optimizer.param_groups:
        param_group['lr'] = param_group['lr'] * (0.5 ** (epoch // 5))

def doTrain(epoch):
    model.train()
    mean_loss = 0.0
    mean_error = 0.0
    #adjust_learning_rate(optimizer, epoch)
    for i, batch in enumerate(train_loader):
        input, label = batch
        label = label.type(torch.FloatTensor)
        if args.gpu != -1:
            input = input.cuda()
            label = label.cuda()
        input = torch.autograd.Variable(input)
        label = torch.autograd.Variable(label)
        pred = model(input)
        loss = criterion(pred, label)
        #loss = L1Loss(pred, label)
        error = L1Loss(pred, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        #set_trace() 
        mean_loss += loss.data[0]
        mean_error += error.data[0]

    mean_loss = mean_loss / float(len(train_loader))
    mean_error /= float(len(train_loader))
    print('Epoch:[%d], Train Error: %.4f, lr:%.6f' % (epoch, mean_error,args.lr*float(0.5**(epoch//5))))

def doValidate():
    global best_error
    model.eval()
    mean_loss = 0.0
    mean_error = 0.0
    if not args.isTrain:
        test_res = []
    for i, batch in enumerate(test_loader):
        if args.isTrain:
            input, label = batch
        else:
            input, label, names = batch
        label = label.type(torch.FloatTensor)
        if args.gpu != -1:
            input = input.cuda()
            label = label.cuda()
        input = torch.autograd.Variable(input)
        label = torch.autograd.Variable(label)
        pred = model(input)
        loss = criterion(pred, label)
        #loss = L1Loss(pred, label)
        error = L1Loss(pred, label)
        mean_loss += loss.data[0]
        mean_error += error.data[0]
        test_res += save_height_res(pred.data.cpu().numpy(), label.data.cpu().numpy(), names)
    mean_loss = mean_loss / float(len(test_loader))
    mean_error /= float(len(test_loader))
    if mean_error <  best_error and args.isTrain:
        best_error = mean_error
        torch.save(model.state_dict(), "./{}/{}".format(args.model_save_path, args.model_save_name))
    #print('Test loss: %.4f ' % mean_loss)
    print('Test Error : %.4f meters \t Best: %.4f meters' % (mean_error, best_error))
    if not args.isTrain:
        with open(args.test_res_savepath, 'w') as f:
            f.writelines(test_res)
        print('test results saved to %s' % args.test_res_savepath)
    return mean_loss

if args.isTrain:
    for epoch in range(args.epoches):
        doTrain(epoch)
        doValidate()
else:
    mean_loss = doValidate()
    print('mean_loss = %.4f' % mean_loss)







