import numpy as np
import easydict
import datetime
import pprint
import os
import shutil

import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader

from Yolo.darknet import Darknet19
from DataSet.TongueDS import TongueDS,DLTransform
from Tools.tools import AverageMeter


##############################################

args = easydict.EasyDict()

args.epochs = 300
args.lr = 0.001
args.momentum = 0.9
args.lr_step = [50,100,150]
args.eval_freq = 3
args.weight_decay=0.0005

args.savedir = '/mnt/md0/Qiu/ExWorkspace/YOLO_TEST'

LR = 0

##############################################


train_dataset = TongueDS('train')
test_dataset  = TongueDS('test')

train_dl = DataLoader(train_dataset,batch_size=32,shuffle=True,drop_last=True,num_workers=32)
test_dl  = DataLoader(test_dataset, batch_size=32,shuffle=True,drop_last=True,num_workers=32)

model = Darknet19().cuda()
# model = nn.DataParallel(model)
optim = torch.optim.SGD(model.parameters(),lr=args.lr,momentum=args.momentum,weight_decay=args.weight_decay)

print('START: ',datetime.datetime.now())

print('*'*30)
pprint.pprint(args)
print('*'*30)

def main():
    global args
    try:
        for epoch in range(args.epochs):
            adjust_learning_rate(optim,epoch,args.lr_step)
            Train(model,train_dl,optim,epoch)
            if epoch%args.eval_freq==0 or epoch == args.epochs-1:
                Validation(model,test_dl,optim,epoch)
                save_checkpoint({'epoch':epoch+1,'arch':'darknet19','state_dict':model.state_dict()},False)
    except Exception as e:
        save_checkpoint({'epoch':-1,'arch':'darknet19','state_dict':model.state_dict()},False)
        raise e

def Train(model,dataloader,optim,epoch):
    global args
    model.train()
    optim.zero_grad()

    losses      = AverageMeter()
    iou_losses  = AverageMeter()
    bbox_losses = AverageMeter()
    cls_losses  = AverageMeter()

    for i,raw_input in enumerate(dataloader):
        raw_input = DLTransform(raw_input,volatile=False)
        model.zero_grad()
        input_size = len(raw_input[-2])
        out = model(*raw_input)
        loss = model.loss
        loss.backward()
        optim.step()

        losses.update(loss.data[0],input_size)
        iou_losses.update(model.iou_loss.data[0],input_size)
        bbox_losses.update(model.bbox_loss.data[0],input_size)
        cls_losses.update(model.cls_loss.data[0],input_size)

        if i%args.eval_freq==0:
            print('\nEpoch: {0}[{1}/{2}] lr: {lr:0.5f} Loss {losses.val:.3f}({losses.avg:.3f})\t' 'Iou. Loss {iou_losses.val:.3f}({iou_losses.avg:.3f})\t'
                  'BBox. Loss {bbox_losses.val:.3f}({bbox_losses.avg:.3f})\t'
                  'Cls. Loss {cls_losses.val:.3f}({cls_losses.avg:.3f})\t'.
                  format(epoch,i,len(dataloader),lr=optim.param_groups[0]['lr']
                         ,losses=losses,iou_losses=iou_losses, bbox_losses=bbox_losses,cls_losses=cls_losses),flush=True)

def Validation(model,dataloader,optim,epoch):
    global args
    model.eval()

    losses      = AverageMeter()
    iou_losses  = AverageMeter()
    bbox_losses = AverageMeter()
    cls_losses  = AverageMeter()

    for i,raw_input in enumerate(dataloader):
        raw_input = DLTransform(raw_input,volatile=True)
        input_size = len(raw_input[-2])
        out = model(*raw_input)
        loss = model.loss

        losses.update(loss.data[0],input_size)
        iou_losses.update(model.iou_loss.data[0],input_size)
        bbox_losses.update(model.bbox_loss.data[0],input_size)
        cls_losses.update(model.cls_loss.data[0],input_size)

        if i%args.eval_freq==0:
            print('\nEpoch: {0}[{1}/{2}] lr: {lr:0.5f} Loss {losses.val:.3f}({losses.avg:.3f})\t'
                  'Iou. Loss {iou_losses.val:.3f}({iou_losses.avg:.3f})\t'
                  'BBox. Loss {bbox_losses.val:.3f}({bbox_losses.avg:.3f})\t'
                  'Cls. Loss {cls_losses.val:.3f}({cls_losses.avg:.3f})\t'.
                  format(epoch,i,len(dataloader),lr=optim.param_groups[0]['lr']
                         ,losses=losses,iou_losses=iou_losses, bbox_losses=bbox_losses,cls_losses=cls_losses),flush=True)

    print('\nTesting Result: Loss {loss.avg:.4f}\t'
          'Iou. Loss {iou_losses.val:.5f}({iou_losses.avg:.5f})\t'
          'BBox. Loss {bbox_losses.val:.5f}({bbox_losses.avg:.5f})\t'
          'Cls. Loss {cls_losses.val:.5f}({cls_losses.avg:.5f})\t'.format(loss=losses,iou_losses=iou_losses,bbox_losses=bbox_losses,cls_losses=cls_losses),flush=True)


def adjust_learning_rate(optimizer, epoch, lr_steps):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
    lr = args.lr * decay
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


saved_queue = []
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
    epoch = state['epoch']

    savedir = args.savedir
    if os.path.exists(savedir) == False:
        os.mkdir(savedir)

    filename = savedir + '/YoloDetection_' + '_'.join(
        (str(epoch), filename))

    global saved_queue
    if len(saved_queue) > 50:
        file = saved_queue[0]
        os.remove(file)
        saved_queue = saved_queue[1:]

    saved_queue.append(filename)
    torch.save(state, filename)


if __name__=='__main__':
    main()
