#coding=utf8
from __future__ import division
import torch
import os,time
from torch.autograd import Variable
import logging
from models.ssd.ssd import build_ssd
import numpy as np

torch.set_default_tensor_type('torch.cuda.FloatTensor')

def trainlog(logfilepath, head='%(asctime)-15s %(message)s'):
    logger = logging.getLogger('mylogger')
    logging.basicConfig(filename=logfilepath, level=logging.INFO, format=head)

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter(head)
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


def train(model,
          epoch_num,
          batch_size ,
          start_epoch,
          optimizer,
          criterion ,
          exp_lr_scheduler,
          dataset,
          data_loader,
          usecuda,
          save_inter,
          save_dir
          ):
    if usecuda:
        model_size = model.module.size
        model_num_classes = model.module.num_classes
    else:
        model_size = model.size
        model_num_classes = model.num_classes

    # build test model
    model_eval = build_ssd('test', model_size, model_num_classes)
    if usecuda:
        model_eval = torch.nn.DataParallel(model_eval)

    print('Training SSD on "%s" ' % dataset['train'].name)
    print('training')

    epoch_t0 = time.time()
    for epoch in range(start_epoch, epoch_num):
        epoch_t1 = time.time()
        epoch_since = epoch_t1 - epoch_t0
        epoch_t0 = epoch_t1

        for phase in ['train','val']:
            epoch_size = len(dataset[phase]) // batch_size

            if phase == 'train':
                exp_lr_scheduler.step(epoch)
                logging.info('current lr:%s' % exp_lr_scheduler.get_lr())
                model.train(True)  # Set model to training mode
            else:
                model.train(False)

            epoch_loss = 0

            batch_t0 = time.time()
            for batch_cnt, data in enumerate(data_loader[phase]):
                batch_t1 = time.time()
                batch_since = batch_t1 - batch_t0
                batch_t0 = batch_t1

                images, targets = data
                if usecuda:
                    images = Variable(images.cuda())
                    targets = [Variable(anno.cuda(), volatile=True) for anno in targets]
                else:
                    images = Variable(images)
                    targets = [Variable(anno, volatile=True) for anno in targets]

                out = model(images)

                # backprop
                optimizer.zero_grad()
                loss_l, loss_c = criterion(out, targets)
                loss = loss_l + loss_c

                if phase == 'train':
                    loss.backward()
                    optimizer.step()

                epoch_loss += loss.data[0]

                # batch loss
                if (batch_cnt % 10 == 0) and phase == 'train':
                    logging.info('epoch[%d]-iter[%d] || batch-loss: %.4f || %.3f sec/batch '
                                 % (epoch, batch_cnt, loss.data[0], batch_since))

            epoch_loss = epoch_loss / epoch_size


            logging.info('epoch[%d]-%s-loss: %.4f '
                         % (epoch, phase,epoch_loss))
            if phase == 'train':
                trainloss = epoch_loss

            if epoch % save_inter == 0 and phase == 'val':
                save_path = os.path.join(save_dir,'weights-%d-[%.4f].pth' % (epoch, epoch_loss))
                if usecuda:
                    weights_dict = model.state_dict()
                else:
                    weights_dict = model.state_dict()
                torch.save(weights_dict, save_path)
                logging.info('saved model to %s' % (save_path))

        # eval MAP on dataset['val']
        # load latest weights to model_eval
        model_eval.load_state_dict(torch.load('/home/gserver/zhangchi/ssd-drone/drone_weights/weights-720-[3.5668].pth'))
        print model_eval
        # model_eval.load_state_dict(model.state_dict())
        # weights_dict = model.module.state_dict()
        # save_path = os.path.join(save_dir, 'weights-test.pth')
        # torch.save(weights_dict, save_path)
        # print saved
        if usecuda:
            model_eval.cuda()
        model_eval.eval()

        # todo batch_cnt should be set to 1 , base transform
        det_imgpaths = np.zeros((len(dataset['val']),model_num_classes,200,1),dtype=np.int)
        det_result = np.zeros((len(dataset['val']),model_num_classes,200,5),dtype=np.float32)
        det_imgsz = np.zeros((len(dataset['val']), model_num_classes, 200, 4), dtype=np.float32)


        for batch_cnt, data in enumerate(data_loader['val']):
            # if batch_cnt == 5:break
            images, targets = data
            if usecuda:
                images = Variable(images.cuda())
                targets = [Variable(anno.cuda(), volatile=True) for anno in targets]
            else:
                images = Variable(images)
                targets = [Variable(anno, volatile=True) for anno in targets]

            detections = model_eval(images)
            print 'detected'
            detections =detections.cpu().data.numpy()  #(N,21,200,5) confidence first

            fill_idx = range(batch_cnt * batch_size, batch_cnt * batch_size + detections.shape[0])
            for idx in fill_idx:
                det_imgpaths[idx] = dataset['val'].img_ids[idx]
                det_imgsz[idx, :, :, [0, 2]] = dataset['val'].img_ws[idx]
                det_imgsz[idx, :, :, [1, 3]] = dataset['val'].img_hs[idx]

            det_result[fill_idx] = detections
            det_result[fill_idx, :, :, 1:] *= det_imgsz[fill_idx] #absolute-size bbox

        det_result = np.concatenate((det_imgpaths,det_result), axis=3)  #(num_imgs, classes, 200, 6)

        for class_name in dataset['val'].class_map.keys():

            # extract gt objects for this class
            class_recs, npos, class_idx = get_class_recs(dataset['val'], class_name=class_name)

            # extract det for this class
            class_dets = det_result[:,class_idx,:,:].reshape(-1,6)
            class_dets = class_dets[class_dets[:,1]>0.0]  # detections with confidence greater than 0.0
            print class_name, len(class_dets)
            # cal AP for this class
            rec, prec, ap = cal_class_ap(class_dets, class_recs, npos, ovthresh = 0.3)

            logging.info('AP for "%s" : %.4f'%(class_name, ap))

        logging.info('----time cost: %d sec' % epoch_since)
        logging.info('===' * 20)

def get_class_recs(dataset, class_name):
    class_idx = dataset.class_map[class_name] + 1

    class_recs = {}
    npos = 0  # true num of objects
    anno = dataset.anno
    # print anno
    for img_id in dataset.img_ids:

        img_name = dataset.img_paths[img_id]
        class_anno = anno[(anno['filename'] == img_name) & (anno['class'] == class_idx-1)]
        bbox = [list(box) for box in class_anno[['xmin', 'ymin', 'xmax', 'ymax']].values]
        det = [False] * len(class_anno)
        npos = npos + len(class_anno)
        # print  img_id, img_name, len(class_anno)

        class_recs[img_id] = {'bbox': np.array(bbox), 'det': det}
    return class_recs, npos, class_idx


def cal_class_ap(class_dets, class_recs, npos, ovthresh, use_07_metric=True):
    if len(class_dets) > 0:

        # sort by confidence
        sorted_ind = np.argsort(-class_dets[:, 1])
        sorted_scores = np.sort(-class_dets[:, 1])
        BB = class_dets[sorted_ind, 2:]
        image_ids = class_dets[sorted_ind, 0]

        # go down dets and mark TPs and FPs
        nd = len(image_ids)
        tp = np.zeros(nd)
        fp = np.zeros(nd)

        for d in range(nd):

            # print class_recs
            # print image_ids[d]
            # print class_recs[image_ids[d]]
            R = class_recs[image_ids[d]]
            bb = BB[d, :].astype(float)
            ovmax = -np.inf
            BBGT = R['bbox'].astype(float)
            if BBGT.size > 0:
                # compute overlaps
                # intersection
                ixmin = np.maximum(BBGT[:, 0], bb[0])
                iymin = np.maximum(BBGT[:, 1], bb[1])
                ixmax = np.minimum(BBGT[:, 2], bb[2])
                iymax = np.minimum(BBGT[:, 3], bb[3])
                iw = np.maximum(ixmax - ixmin, 0.)
                ih = np.maximum(iymax - iymin, 0.)
                inters = iw * ih
                uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
                       (BBGT[:, 2] - BBGT[:, 0]) *
                       (BBGT[:, 3] - BBGT[:, 1]) - inters)
                overlaps = inters / uni
                ovmax = np.max(overlaps)
                jmax = np.argmax(overlaps)

            if ovmax > ovthresh:  # if greater than ovthresh
                if not R['det'][jmax]:   # if have not been detected yet
                    # print 'tp+1: ovmax:%.4f'%(ovmax)
                    tp[d] = 1.
                    R['det'][jmax] = 1
                else:
                    # print 'duplicate detect fp+1: ovmax:%.4f'%(ovmax)
                    fp[d] = 1.
            else:
                # print 'fp+1: ovmax:%.4f'%(ovmax)
                fp[d] = 1.
        print 'tp-%d, fp-%d'%(tp.sum(),fp.sum())
        # compute precision recall
        fp = np.cumsum(fp)
        tp = np.cumsum(tp)
        rec = tp / float(npos)
        # avoid divide by zero in case the first detection matches a difficult
        # ground truth
        prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
        ap = voc_ap(rec, prec, use_07_metric=use_07_metric)
    else:
        # print 'None detection'
        rec = -1.
        prec = -1.
        ap = -1.

    return rec, prec, ap


def voc_ap(rec, prec, use_07_metric=True):
    """ ap = voc_ap(rec, prec, [use_07_metric])
    Compute VOC AP given precision and recall.
    If use_07_metric is true, uses the
    VOC 07 11 point method (default:False).
    """
    if use_07_metric:
        # 11 point metric
        ap = 0.
        for t in np.arange(0., 1.1, 0.1):
            if np.sum(rec >= t) == 0:
                p = 0
            else:
                p = np.max(prec[rec >= t])
            ap = ap + p / 11.
    else:
        # correct AP calculation
        # first append sentinel values at the end
        mrec = np.concatenate(([0.], rec, [1.]))
        mpre = np.concatenate(([1.], prec, [0.]))

        # compute the precision envelope
        for i in range(mpre.size - 1, 0, -1):
            mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])

        # to calculate area under PR curve, look for points
        # where X axis (recall) changes value
        i = np.where(mrec[1:] != mrec[:-1])[0]

        # and sum (\Delta recall) * prec
        ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
    return ap