import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable

import struct # get_image_size
import imghdr # get_image_size

def sigmoid(x):
    return 1.0/(math.exp(-x)+1.)

def softmax(x):
    x = torch.exp(x - torch.max(x))
    x = x/x.sum()
    return x

def bbox_iou(box1, box2, x1y1x2y2=True):
    if x1y1x2y2:
        mx = min(box1[0], box2[0])
        Mx = max(box1[2], box2[2])
        my = min(box1[1], box2[1])
        My = max(box1[3], box2[3])
        w1 = box1[2] - box1[0]
        h1 = box1[3] - box1[1]
        w2 = box2[2] - box2[0]
        h2 = box2[3] - box2[1]
    else:
        mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
        Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
        my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
        My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
        w1 = box1[2]
        h1 = box1[3]
        w2 = box2[2]
        h2 = box2[3]
    uw = Mx - mx
    uh = My - my
    cw = w1 + w2 - uw
    ch = h1 + h2 - uh
    carea = 0
    if cw <= 0 or ch <= 0:
        return 0.0

    area1 = w1 * h1
    area2 = w2 * h2
    carea = cw * ch
    uarea = area1 + area2 - carea
    return carea/uarea

def bbox_ious(boxes1, boxes2, x1y1x2y2=True):
    if x1y1x2y2:
        mx = torch.min(boxes1[0], boxes2[0])
        Mx = torch.max(boxes1[2], boxes2[2])
        my = torch.min(boxes1[1], boxes2[1])
        My = torch.max(boxes1[3], boxes2[3])
        w1 = boxes1[2] - boxes1[0]
        h1 = boxes1[3] - boxes1[1]
        w2 = boxes2[2] - boxes2[0]
        h2 = boxes2[3] - boxes2[1]
    else:
        mx = torch.min(boxes1[0]-boxes1[2]/2.0, boxes2[0]-boxes2[2]/2.0)
        Mx = torch.max(boxes1[0]+boxes1[2]/2.0, boxes2[0]+boxes2[2]/2.0)
        my = torch.min(boxes1[1]-boxes1[3]/2.0, boxes2[1]-boxes2[3]/2.0)
        My = torch.max(boxes1[1]+boxes1[3]/2.0, boxes2[1]+boxes2[3]/2.0)
        w1 = boxes1[2]
        h1 = boxes1[3]
        w2 = boxes2[2]
        h2 = boxes2[3]
    uw = Mx - mx
    uh = My - my
    cw = w1 + w2 - uw
    ch = h1 + h2 - uh
    mask = ((cw <= 0) + (ch <= 0) > 0)
    area1 = w1 * h1
    area2 = w2 * h2
    carea = cw * ch
    carea[mask] = 0
    uarea = area1 + area2 - carea
    return carea/uarea

def nms(boxes, nms_thresh):
    if len(boxes) == 0:
        return boxes

    det_confs = torch.zeros(len(boxes))
    for i in range(len(boxes)):
        det_confs[i] = 1-boxes[i][4]                

    _,sortIds = torch.sort(det_confs)
    out_boxes = []
    for i in range(len(boxes)):
        box_i = boxes[sortIds[i]]
        if box_i[4] > 0:
            out_boxes.append(box_i)
            for j in range(i+1, len(boxes)):
                box_j = boxes[sortIds[j]]
                if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
                    #print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
                    box_j[4] = 0
    return out_boxes

def convert2cpu(gpu_matrix):
    return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)

def convert2cpu_long(gpu_matrix):
    return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)

def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False):
    anchor_step = len(anchors)/num_anchors
    if output.dim() == 3:
        output = output.unsqueeze(0)
    batch = output.size(0)
    assert(output.size(1) == (5+num_classes)*num_anchors)
    h = output.size(2)
    w = output.size(3)

    t0 = time.time()
    all_boxes = []
    output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w)


    grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
    grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
    xs = torch.sigmoid(output[0]) + grid_x
    ys = torch.sigmoid(output[1]) + grid_y

    anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))
    anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))
    anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
    anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
    ws = torch.exp(output[2]) * anchor_w
    hs = torch.exp(output[3]) * anchor_h

    det_confs = torch.sigmoid(output[4])

    cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data
    cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
    cls_max_confs = cls_max_confs.view(-1)
    cls_max_ids = cls_max_ids.view(-1)
    t1 = time.time()
    
    sz_hw = h*w
    sz_hwa = sz_hw*num_anchors
    det_confs = convert2cpu(det_confs)
    cls_max_confs = convert2cpu(cls_max_confs)
    cls_max_ids = convert2cpu_long(cls_max_ids)
    xs = convert2cpu(xs)
    ys = convert2cpu(ys)
    ws = convert2cpu(ws)
    hs = convert2cpu(hs)
    if validation:
        cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
    t2 = time.time()
    for b in range(batch):
        boxes = []
        for cy in range(h):
            for cx in range(w):
                for i in range(num_anchors):
                    ind = b*sz_hwa + i*sz_hw + cy*w + cx
                    det_conf =  det_confs[ind]
                    if only_objectness:
                        conf =  det_confs[ind]
                    else:
                        conf = det_confs[ind] * cls_max_confs[ind]
    
                    if conf > conf_thresh:
                        bcx = xs[ind]
                        bcy = ys[ind]
                        bw = ws[ind]
                        bh = hs[ind]
                        cls_max_conf = cls_max_confs[ind]
                        cls_max_id = cls_max_ids[ind]
                        box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
                        if (not only_objectness) and validation:
                            for c in range(num_classes):
                                tmp_conf = cls_confs[ind][c]
                                if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
                                    box.append(tmp_conf)
                                    box.append(c)
                        boxes.append(box)
        all_boxes.append(boxes)
    t3 = time.time()
    if False:
        print('---------------------------------')
        print('matrix computation : %f' % (t1-t0))
        print('        gpu to cpu : %f' % (t2-t1))
        print('      boxes filter : %f' % (t3-t2))
        print('---------------------------------')
    return all_boxes

def get_region_boxes_v2(output, n_models, conf_thresh, num_classes, anchors, num_anchors, only_objectness=1, validation=False):
    cs = n_models
    nA = num_anchors
    nC = num_classes
    anchor_step = len(anchors)/num_anchors
    if output.dim() == 3:
        output = output.unsqueeze(0)
    batch = output.size(0)
    assert(output.size(1) == (5+num_classes)*num_anchors)
    nH = h = output.size(2)
    nW = w = output.size(3)
    assert (batch % n_models == 0)
    bs = batch // n_models

    t0 = time.time()
    all_boxes = []
    # import pdb; pdb.set_trace()
    cls = output.view(output.size(0), nA, (5+nC), nH, nW)
    cls = cls.index_select(2, torch.linspace(5,5+nC-1,nC).long().cuda()).squeeze()
    cls = cls.view(bs, cs, nA*nC*nH*nW).transpose(1,2).contiguous().view(bs*nA*nC*nH*nW, cs)
    normfn = torch.nn.Softmax(dim=1)

    # cls = torch.nn.Softmax(dim=1)(Variable(cls)).data
    cls = normfn(Variable(cls)).data
    cls_confs = cls.view(bs, nA*nC*nH*nW, cs).transpose(1,2).contiguous().view(bs*cs*nA, nC, nH*nW).transpose(1,2).view(bs*cs*nA*nH*nW, nC)

    output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w)

    grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
    grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
    xs = torch.sigmoid(output[0]) + grid_x
    ys = torch.sigmoid(output[1]) + grid_y

    anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))
    anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))
    anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
    anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).cuda()
    ws = torch.exp(output[2]) * anchor_w
    hs = torch.exp(output[3]) * anchor_h

    det_confs = torch.sigmoid(output[4])

    # cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data
    cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
    cls_max_confs = cls_max_confs.view(-1)
    cls_max_ids = cls_max_ids.view(-1)
    t1 = time.time()
    
    sz_hw = h*w
    sz_hwa = sz_hw*num_anchors
    det_confs = convert2cpu(det_confs)
    cls_max_confs = convert2cpu(cls_max_confs)
    cls_max_ids = convert2cpu_long(cls_max_ids)
    xs = convert2cpu(xs)
    ys = convert2cpu(ys)
    ws = convert2cpu(ws)
    hs = convert2cpu(hs)
    if validation:
        cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
    t2 = time.time()
    for b in range(batch):
        boxes = []
        for cy in range(h):
            for cx in range(w):
                for i in range(num_anchors):
                    ind = b*sz_hwa + i*sz_hw + cy*w + cx
                    det_conf =  det_confs[ind]
                    if only_objectness:
                        conf =  det_confs[ind]
                    else:
                        conf = det_confs[ind] * cls_max_confs[ind]
    
                    if conf > conf_thresh:
                        bcx = xs[ind]
                        bcy = ys[ind]
                        bw = ws[ind]
                        bh = hs[ind]
                        cls_max_conf = cls_max_confs[ind]
                        cls_max_id = cls_max_ids[ind]
                        box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
                        if (not only_objectness) and validation:
                            for c in range(num_classes):
                                tmp_conf = cls_confs[ind][c]
                                if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
                                    box.append(tmp_conf)
                                    box.append(c)
                        boxes.append(box)
        all_boxes.append(boxes)
    t3 = time.time()
    if False:
        print('---------------------------------')
        print('matrix computation : %f' % (t1-t0))
        print('        gpu to cpu : %f' % (t2-t1))
        print('      boxes filter : %f' % (t3-t2))
        print('---------------------------------')
    return all_boxes


def get_region_boxes_v3(output, n_models, conf_thresh, num_classes, anchors, num_anchors, feature_scale, input_size,
                        only_objectness=1, validation=False):
    cs = n_models # class number
    nA = num_anchors # anchor number of each node
    nC = num_classes # number of class in each pred result
    anchor_step = len(anchors) / num_anchors
    batch = output.size(0)
    assert (output.size(1) == (5 + num_classes) * num_anchors)
    N = output.size(2)
    assert (batch % n_models == 0)
    bs = batch // n_models

    t0 = time.time()
    all_boxes = []
    # import pdb; pdb.set_trace()
    cls = output.view(output.size(0), nA, (5 + nC), N)
    cls = cls.index_select(2, torch.linspace(5, 5 + nC - 1, nC).long().cuda()).squeeze()
    cls = cls.view(bs, cs, nA * nC * N).transpose(1, 2).contiguous().view(bs * nA * nC * N, cs)
    normfn = torch.nn.Softmax(dim=1)

    # cls = torch.nn.Softmax(dim=1)(Variable(cls)).data
    cls = normfn(Variable(cls)).data
    cls_confs = cls.view(bs, nA * nC * N, cs).transpose(1, 2).contiguous().view(bs * cs * nA, nC, N)\
        .transpose(1, 2).view(bs * cs * nA * N, nC)

    output = output.view(batch * num_anchors, 5 + num_classes, N).transpose(0, 1).contiguous()\
        .view(5 + num_classes, batch * num_anchors * N)

    grid_x = []
    grid_y = []
    anchor_w = []
    anchor_h = []
    scale = []
    feature_size = []
    for fs in feature_scale:
        feature_h = input_size[0] / fs
        feature_w = input_size[1] / fs
        feature_size.append([feature_h, feature_w])
        grid_x.append(torch.linspace(0, feature_w - 1, feature_w).repeat(feature_h, 1) \
                      .repeat(batch * nA, 1, 1).view(batch, nA, feature_h * feature_w).cuda())
        grid_y.append(torch.linspace(0, feature_h - 1, feature_h).repeat(feature_w, 1).t() \
                      .repeat(batch * nA, 1, 1).view(batch, nA, feature_h * feature_w).cuda())
        scale.append((torch.ones(batch, nA, feature_h * feature_w) * fs).cuda())
    grid_x = torch.cat(grid_x, 2).view(batch * nA * N)  # (nB, nA, N)
    grid_y = torch.cat(grid_y, 2).view(batch * nA * N)
    scale = torch.cat(scale, 2).view(batch * nA * N)
    for i in range(3):
        aw = torch.Tensor(anchors[6 * i:6 * (i + 1)]).view(nA, -1) \
            .index_select(1, torch.LongTensor([0])).cuda()
        ah = torch.Tensor(anchors[6 * i:6 * (i + 1)]).view(nA, -1) \
            .index_select(1, torch.LongTensor([1])).cuda()
        anchor_w.append(aw.repeat(batch, feature_size[i][0] * feature_size[i][1]) \
                        .view(batch, nA, feature_size[i][0] * feature_size[i][1]))
        anchor_h.append(ah.repeat(batch, feature_size[i][0] * feature_size[i][1]) \
                        .view(batch, nA, feature_size[i][0] * feature_size[i][1]))
    anchor_w = torch.cat(anchor_w, 2).view(batch * nA * N)
    anchor_h = torch.cat(anchor_h, 2).view(batch * nA * N)

    xs = (torch.sigmoid(output[0]) + grid_x) * scale
    ys = (torch.sigmoid(output[1]) + grid_y) * scale
    ws = torch.exp(output[2]) * anchor_w
    hs = torch.exp(output[3]) * anchor_h

    det_confs = torch.sigmoid(output[4])
    print('conf_number = {}'.format(torch.sum(det_confs > 0.5)))

    # cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data
    cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
    cls_max_confs = cls_max_confs.view(-1)
    cls_max_ids = cls_max_ids.view(-1)
    t1 = time.time()

    sz_na = N * num_anchors
    det_confs = convert2cpu(det_confs)
    cls_max_confs = convert2cpu(cls_max_confs)
    cls_max_ids = convert2cpu_long(cls_max_ids)
    xs = convert2cpu(xs)
    ys = convert2cpu(ys)
    ws = convert2cpu(ws)
    hs = convert2cpu(hs)
    if validation:
        cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
    t2 = time.time()
    for b in range(batch):
        boxes = []
        for n in range(N):
            for i in range(num_anchors):
                ind = b * sz_na + i * N + n
                det_conf = det_confs[ind]
                if only_objectness:
                    conf = det_confs[ind]
                else:
                    conf = det_confs[ind] * cls_max_confs[ind]

                if conf > conf_thresh:
                    bcx = xs[ind]
                    bcy = ys[ind]
                    bw = ws[ind]
                    bh = hs[ind]
                    cls_max_conf = cls_max_confs[ind]
                    cls_max_id = cls_max_ids[ind]
                    box = [bcx / input_size[1], bcy / input_size[0], bw / input_size[1], bh / input_size[0],\
                           det_conf, cls_max_conf, cls_max_id]
                    # if (not only_objectness) and validation:
                    #     for c in range(num_classes):
                    #         tmp_conf = cls_confs[ind][c]
                    #         if c != cls_max_id and det_confs[ind] * tmp_conf > conf_thresh:
                    #             box.append(tmp_conf)
                    #             box.append(c)
                    boxes.append(box)
        all_boxes.append(boxes)
    t3 = time.time()
    if False:
        print('---------------------------------')
        print('matrix computation : %f' % (t1 - t0))
        print('        gpu to cpu : %f' % (t2 - t1))
        print('      boxes filter : %f' % (t3 - t2))
        print('---------------------------------')
    return all_boxes


def get_region_boxes_v4(output, n_models, conf_thresh, num_classes, anchors, num_anchors, feature_scale, input_size,
                        only_objectness=1, validation=False):
    cs = n_models # class number
    nA = num_anchors # anchor number of each node
    nC = num_classes # number of class in each pred result
    anchor_step = len(anchors) / num_anchors
    batch = output.size(0)
    assert (output.size(1) == (5 + num_classes) * num_anchors)
    N = output.size(2)
    assert (batch % n_models == 0)
    bs = batch // n_models

    t0 = time.time()
    all_boxes = []
    # import pdb; pdb.set_trace()
    cls = output.view(output.size(0), nA, (5 + nC), N)
    cls = cls.index_select(2, torch.linspace(5, 5 + nC - 1, nC).long().cuda()).squeeze()
    cls = cls.view(bs, cs, nA * nC * N).transpose(1, 2).contiguous().view(bs * nA * nC * N, cs)
    normfn = torch.nn.Softmax(dim=1)

    # cls = torch.nn.Softmax(dim=1)(Variable(cls)).data
    cls_confs = normfn(Variable(cls)).data

    output = output.view(batch * num_anchors, 5 + num_classes, N).transpose(0, 1).contiguous()\
        .view(5 + num_classes, batch * num_anchors * N)

    grid_x = []
    grid_y = []
    anchor_w = []
    anchor_h = []
    scale = []
    feature_size = []
    for fs in feature_scale:
        feature_h = input_size[0] / fs
        feature_w = input_size[1] / fs
        feature_size.append([feature_h, feature_w])
        grid_x.append(torch.linspace(0, feature_w - 1, feature_w).repeat(feature_h, 1) \
                      .repeat(batch * nA, 1, 1).view(batch, nA, feature_h * feature_w).cuda())
        grid_y.append(torch.linspace(0, feature_h - 1, feature_h).repeat(feature_w, 1).t() \
                      .repeat(batch * nA, 1, 1).view(batch, nA, feature_h * feature_w).cuda())
        scale.append((torch.ones(batch, nA, feature_h * feature_w) * fs).cuda())
    grid_x = torch.cat(grid_x, 2).view(batch * nA * N)  # (nB, nA, N)
    grid_y = torch.cat(grid_y, 2).view(batch * nA * N)
    scale = torch.cat(scale, 2).view(batch * nA * N)
    for i in range(3):
        aw = torch.Tensor(anchors[6 * i:6 * (i + 1)]).view(nA, -1) \
            .index_select(1, torch.LongTensor([0])).cuda()
        ah = torch.Tensor(anchors[6 * i:6 * (i + 1)]).view(nA, -1) \
            .index_select(1, torch.LongTensor([1])).cuda()
        anchor_w.append(aw.repeat(batch, feature_size[i][0] * feature_size[i][1]) \
                        .view(batch, nA, feature_size[i][0] * feature_size[i][1]))
        anchor_h.append(ah.repeat(batch, feature_size[i][0] * feature_size[i][1]) \
                        .view(batch, nA, feature_size[i][0] * feature_size[i][1]))
    anchor_w = torch.cat(anchor_w, 2).view(batch * nA * N)
    anchor_h = torch.cat(anchor_h, 2).view(batch * nA * N)

    xs = (torch.sigmoid(output[0]) + grid_x) * scale
    ys = (torch.sigmoid(output[1]) + grid_y) * scale
    ws = torch.exp(output[2]) * anchor_w
    hs = torch.exp(output[3]) * anchor_h

    det_confs = torch.sigmoid(output[4])

    # cls_confs = torch.nn.Softmax()(Variable(output[5:5+num_classes].transpose(0,1))).data
    cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
    cls_max_confs = cls_max_confs.view(-1)
    cls_max_ids = cls_max_ids.view(-1, 1)
    t1 = time.time()

    det_confs = det_confs.view(bs, cs, nA * N).transpose(1, 2).contiguous().view(bs * nA * N, cs).gather(1, cls_max_ids).view(-1)
    xs = xs.view(bs, cs, nA * N).transpose(1, 2).contiguous().view(bs * nA * N, cs).gather(1, cls_max_ids).view(-1)
    ys = ys.view(bs, cs, nA * N).transpose(1, 2).contiguous().view(bs * nA * N, cs).gather(1, cls_max_ids).view(-1)
    ws = ws.view(bs, cs, nA * N).transpose(1, 2).contiguous().view(bs * nA * N, cs).gather(1, cls_max_ids).view(-1)
    hs = hs.view(bs, cs, nA * N).transpose(1, 2).contiguous().view(bs * nA * N, cs).gather(1, cls_max_ids).view(-1)

    sz_na = N * num_anchors
    det_confs = convert2cpu(det_confs)
    xs = convert2cpu(xs)
    ys = convert2cpu(ys)
    ws = convert2cpu(ws)
    hs = convert2cpu(hs)
    cls_max_confs = convert2cpu(cls_max_confs)
    cls_max_ids = convert2cpu_long(cls_max_ids.view(-1))
    if validation:
        cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
    t2 = time.time()
    for b in range(bs):
        boxes = []
        for n in range(N):
            for i in range(num_anchors):
                ind = b * sz_na + i * N + n
                det_conf = det_confs[ind]
                if only_objectness:
                    conf = det_confs[ind]
                else:
                    conf = det_confs[ind] * cls_max_confs[ind]

                if conf > conf_thresh:
                    bcx = xs[ind]
                    bcy = ys[ind]
                    bw = ws[ind]
                    bh = hs[ind]
                    cls_max_conf = cls_max_confs[ind]
                    cls_max_id = cls_max_ids[ind]
                    box = [bcx / input_size[1], bcy / input_size[0], bw / input_size[1], bh / input_size[0],\
                           det_conf, cls_max_conf, cls_max_id]
                    # if (not only_objectness) and validation:
                    #     for c in range(num_classes):
                    #         tmp_conf = cls_confs[ind][c]
                    #         if c != cls_max_id and det_confs[ind] * tmp_conf > conf_thresh:
                    #             box.append(tmp_conf)
                    #             box.append(c)
                    boxes.append(box)
        all_boxes.append(boxes)
    t3 = time.time()
    if False:
        print('---------------------------------')
        print('matrix computation : %f' % (t1 - t0))
        print('        gpu to cpu : %f' % (t2 - t1))
        print('      boxes filter : %f' % (t3 - t2))
        print('---------------------------------')
    return all_boxes


def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
    import cv2
    colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
    def get_color(c, x, max_val):
        ratio = float(x)/max_val * 5
        i = int(math.floor(ratio))
        j = int(math.ceil(ratio))
        ratio = ratio - i
        r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
        return int(r*255)

    width = img.shape[1]
    height = img.shape[0]
    for i in range(len(boxes)):
        box = boxes[i]
        x1 = int(round((box[0] - box[2]/2.0) * width))
        y1 = int(round((box[1] - box[3]/2.0) * height))
        x2 = int(round((box[0] + box[2]/2.0) * width))
        y2 = int(round((box[1] + box[3]/2.0) * height))

        if color:
            rgb = color
        else:
            rgb = (255, 0, 0)
        if len(box) >= 7 and class_names:
            cls_conf = box[5]
            cls_id = box[6]
            print('%s: %f' % (class_names[cls_id], cls_conf))
            classes = len(class_names)
            offset = cls_id * 123457 % classes
            red   = get_color(2, offset, classes)
            green = get_color(1, offset, classes)
            blue  = get_color(0, offset, classes)
            if color is None:
                rgb = (red, green, blue)
            img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
        img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 1)
    if savename:
        print("save plot results to %s" % savename)
        cv2.imwrite(savename, img)
    return img

def plot_boxes(img, boxes, savename=None, class_names=None):
    colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
    def get_color(c, x, max_val):
        ratio = float(x)/max_val * 5
        i = int(math.floor(ratio))
        j = int(math.ceil(ratio))
        ratio = ratio - i
        r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
        return int(r*255)

    width = img.width
    height = img.height
    draw = ImageDraw.Draw(img)
    for i in range(len(boxes)):
        box = boxes[i]
        x1 = (box[0] - box[2]/2.0) * width
        y1 = (box[1] - box[3]/2.0) * height
        x2 = (box[0] + box[2]/2.0) * width
        y2 = (box[1] + box[3]/2.0) * height

        rgb = (255, 0, 0)
        if len(box) >= 7 and class_names:
            cls_conf = box[5]
            cls_id = box[6]
            print('%s: %f' % (class_names[cls_id], cls_conf))
            classes = len(class_names)
            offset = cls_id * 123457 % classes
            red   = get_color(2, offset, classes)
            green = get_color(1, offset, classes)
            blue  = get_color(0, offset, classes)
            rgb = (red, green, blue)
            draw.text((x1, y1), class_names[cls_id], fill=rgb)
        draw.rectangle([x1, y1, x2, y2], outline = rgb)
    if savename:
        print("save plot results to %s" % savename)
        img.save(savename)
    return img

def read_truths(lab_path):
    if not os.path.exists(lab_path):
        return np.array([])
    if os.path.getsize(lab_path):
        truths = np.loadtxt(lab_path)
        truths = truths.reshape(truths.size/5, 5) # to avoid single truth problem
        return truths
    else:
        return np.array([])

def read_truths_args(lab_path, min_box_scale):
    truths = read_truths(lab_path)
    new_truths = []
    for i in range(truths.shape[0]):
        if truths[i][3] < min_box_scale:
            continue
        new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4]])
    return np.array(new_truths)

def load_class_names(namesfile):
    class_names = []
    with open(namesfile, 'r') as fp:
        lines = fp.readlines()
    for line in lines:
        line = line.rstrip()
        class_names.append(line)
    return class_names

def image2torch(img):
    width = img.width
    height = img.height
    img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
    img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
    img = img.view(1, 3, height, width)
    img = img.float().div(255.0)
    return img

def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
    model.eval()
    t0 = time.time()

    if isinstance(img, Image.Image):
        width = img.width
        height = img.height
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
        img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
        img = img.view(1, 3, height, width)
        img = img.float().div(255.0)
    elif type(img) == np.ndarray: # cv2 image
        img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
    else:
        print("unknow image type")
        exit(-1)

    t1 = time.time()

    if use_cuda:
        img = img.cuda()
    img = torch.autograd.Variable(img)
    t2 = time.time()

    output = model(img)
    output = output.data
    #for j in range(100):
    #    sys.stdout.write('%f ' % (output.storage()[j]))
    #print('')
    t3 = time.time()

    boxes = get_region_boxes(output, conf_thresh, model.num_classes, model.anchors, model.num_anchors)[0]
    #for j in range(len(boxes)):
    #    print(boxes[j])
    t4 = time.time()

    boxes = nms(boxes, nms_thresh)
    t5 = time.time()

    if False:
        print('-----------------------------------')
        print(' image to tensor : %f' % (t1 - t0))
        print('  tensor to cuda : %f' % (t2 - t1))
        print('         predict : %f' % (t3 - t2))
        print('get_region_boxes : %f' % (t4 - t3))
        print('             nms : %f' % (t5 - t4))
        print('           total : %f' % (t5 - t0))
        print('-----------------------------------')
    return boxes

def read_data_cfg(datacfg):
    options = dict()
    options['gpus'] = '0,1,2,3'
    options['num_workers'] = '10'
    with open(datacfg, 'r') as fp:
        lines = fp.readlines()

    for line in lines:
        line = line.strip()
        if line == '' or line.startswith('#'):
            continue
        key,value = line.split('=')
        key = key.strip()
        value = value.strip()
        options[key] = value
    return options

def scale_bboxes(bboxes, width, height):
    import copy
    dets = copy.deepcopy(bboxes)
    for i in range(len(dets)):
        dets[i][0] = dets[i][0] * width
        dets[i][1] = dets[i][1] * height
        dets[i][2] = dets[i][2] * width
        dets[i][3] = dets[i][3] * height
    return dets


def is_dict(filename):
    with open(filename, 'r') as f:
        first_line = f.readline().strip().split()
    if len(first_line) == 2:
        return True
    else:
        return False


def file_lines(thefilepath):
    if is_dict(thefilepath):
        return all_file_lines(thefilepath)
    else:
        return _file_lines(thefilepath)


def _file_lines(thefilepath):
    count = 0
    thefile = open(thefilepath, 'rb')
    while True:
        buffer = thefile.read(8192*1024)
        if not buffer:
            break
        count += buffer.count('\n')
    thefile.close()
    return count


def all_file_lines(file_dict):
    with open(file_dict, 'r') as f:
        files = [line.rstrip().split()[-1] for line in f.readlines()]
    lines = []
    for file in files:
        with open(file, 'r') as f:
            lines.extend(f.readlines())
    return len(set(lines))


# def all_file_lines(file_dict):
#     # Backup labeled image paths (for meta-model)
#     count = 0
#     with open(file_dict, 'r') as f:
#         files = [line.rstrip().split()[-1] for line in f.readlines()]
#         for file in files:
#             count += _file_lines(file)
#     return count


def get_image_size(fname):
    '''Determine the image type of fhandle and return its size.
    from draco'''
    with open(fname, 'rb') as fhandle:
        head = fhandle.read(24)
        if len(head) != 24: 
            return
        if imghdr.what(fname) == 'png':
            check = struct.unpack('>i', head[4:8])[0]
            if check != 0x0d0a1a0a:
                return
            width, height = struct.unpack('>ii', head[16:24])
        elif imghdr.what(fname) == 'gif':
            width, height = struct.unpack('<HH', head[6:10])
        elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':
            try:
                fhandle.seek(0) # Read 0xff next
                size = 2 
                ftype = 0 
                while not 0xc0 <= ftype <= 0xcf:
                    fhandle.seek(size, 1)
                    byte = fhandle.read(1)
                    while ord(byte) == 0xff:
                        byte = fhandle.read(1)
                    ftype = ord(byte)
                    size = struct.unpack('>H', fhandle.read(2))[0] - 2 
                # We are at a SOFn block
                fhandle.seek(1, 1)  # Skip `precision' byte.
                height, width = struct.unpack('>HH', fhandle.read(4))
            except Exception: #IGNORE:W0703
                return
        else:
            return
        return width, height

def logging(message):
    print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
