
import sys
import os
import os.path as osp
import numpy as np
import cv2
import time
import torch
import argparse
import matplotlib.pyplot as plt

sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from lib.nn.cenetrnet import CenterNet
from lib.config import cfg, update_config
from lib.utils.nms import py_cpu_nms, py_cpu_softnms
from lib.utils.metric import AverageMeter

def get_args():
    parser = argparse.ArgumentParser(description='CenterFace Detection')
    parser.add_argument('--cfg', default='', type=str, help='Config file path')
    parser.add_argument('--pth', default='', type=str, help='Weight path')
    parser.add_argument('--img', default='', type=str)
    return parser.parse_args()

def pre_process(img):
    img = img[:, :, ::-1]
    img = img.astype(np.float32)
    img = (img - 127.5) / 128.0
    img = img.transpose(2, 0, 1)
    return img



def detect_face(image, shrink=1.0):
    h, w = image.shape[:2]
 
    # if in_size is None: # original scale
    #     new_w, new_h = w, h
    #     new_w, new_h = int(np.ceil(new_w / 4) * 4), int(np.ceil(new_h / 4) * 4)
    #     rimg = cv2.resize(img, (new_w, new_h))
    #     scale = (w/new_w, h/new_h)
    # else:
    #     input_w, input_h  = in_size
    #     input_w, input_h = int(np.ceil(input_w / 32) * 32), int(np.ceil(input_h / 32) * 32)
    #     sw = w/input_w
    #     sh = h/input_h
    #     s = sw if sw > sh else sh
    #     simg = cv2.resize(img, dsize=None, fx=1.0/s, fy=1.0/s)
    #     rimg = np.zeros((input_h, input_w, 3), dtype=np.uint8)
    #     rimg[:simg.shape[0], :simg.shape[1], :] = simg
    #     scale=[s, s]
    
    new_w, new_h = int(w*shrink), int(h*shrink)
    new_w, new_h = int(np.ceil(new_w / 32) * 32), int(np.ceil(new_h / 32) * 32)
    rimg = image.copy()
    if (w, h)!=(new_w, new_h):
        rimg = cv2.resize(image, (new_w, new_h))
    scale = (w/new_w, h/new_h)
    
    inputs = pre_process(rimg)
    inputs = torch.Tensor([inputs]).to(device)
    bboxes, scores, clses = model(inputs)
    bboxes = bboxes.cpu().numpy()
    scores = scores.cpu().reshape(scores.shape[0], -1).numpy()

    keep = scores>score_thresh
    
    _bboxes = bboxes[keep]
    _scores = scores[keep]    

    _bboxes[:, ::2]*=scale[0]
    _bboxes[:, 1::2]*=scale[1]
    _bboxes[:, ::2] = np.clip(_bboxes[:, ::2], 0, w)
    _bboxes[:, 1::2] = np.clip(_bboxes[:, 1::2], 0, h)
    
    # do nms
    dets = np.hstack((_bboxes, _scores[:, np.newaxis])).astype(np.float32, copy=False)
    keep = py_cpu_nms(dets, nms_thresh)
    dets = dets[keep]
    return dets


def flip_test(image, shrink):
    img = cv2.flip(image, 1)
    det_f = detect_face(img, shrink)
    det_t = np.zeros(det_f.shape)
    det_t[:, 0] = image.shape[1] - det_f[:, 2]
    det_t[:, 1] = det_f[:, 1]
    det_t[:, 2] = image.shape[1] - det_f[:, 0]
    det_t[:, 3] = det_f[:, 3]
    det_t[:, 4] = det_f[:, 4]
    return det_t


def multi_scale_test(image, max_shrink):
    # Shrink detecting is only used to detect big faces
    st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
    det_s = detect_face(image, st)
    index = np.where(
        np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)
        > 30)[0]
    det_s = det_s[index, :]
    # Enlarge one times
    bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
    det_b = detect_face(image, bt)

    # Enlarge small image x times for small faces
    if max_shrink > 2:
        bt *= 2
        while bt < max_shrink:
            det_b = np.row_stack((det_b, detect_face(image, bt)))
            bt *= 2
        det_b = np.row_stack((det_b, detect_face(image, max_shrink)))

    # Enlarged images are only used to detect small faces.
    if bt > 1:
        index = np.where(
            np.minimum(det_b[:, 2] - det_b[:, 0] + 1,
                       det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
        det_b = det_b[index, :]
    # Shrinked images are only used to detect big faces.
    else:
        index = np.where(
            np.maximum(det_b[:, 2] - det_b[:, 0] + 1,
                       det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
        det_b = det_b[index, :]
    return det_s, det_b


def multi_scale_test_pyramid(image, max_shrink):
    # Use image pyramids to detect faces
    det_b = detect_face(image, 0.25)
    index = np.where(
        np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)
        > 30)[0]
    det_b = det_b[index, :]

    st = [0.75, 1.25, 1.5, 1.75]
    for i in range(len(st)):
        if (st[i] <= max_shrink):
            det_temp = detect_face(image, st[i])
            # Enlarged images are only used to detect small faces.
            if st[i] > 1:
                index = np.where(
                    np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,
                               det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]
                det_temp = det_temp[index, :]
            # Shrinked images are only used to detect big faces.
            else:
                index = np.where(
                    np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,
                               det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]
                det_temp = det_temp[index, :]
            det_b = np.row_stack((det_b, det_temp))
    return det_b


def get_shrink(height, width):
    """
    Args:
        height (int): image height.
        width (int): image width.
    """
    # avoid out of memory
    max_shrink_v1 = (0x7fffffff / 577.0 / (height * width))**0.5
    max_shrink_v2 = ((678 * 1024 * 2.0 * 2.0) / (height * width))**0.5

    def get_round(x, loc):
        str_x = str(x)
        if '.' in str_x:
            str_before, str_after = str_x.split('.')
            len_after = len(str_after)
            if len_after >= 3:
                str_final = str_before + '.' + str_after[0:loc]
                return float(str_final)
            else:
                return x

    max_shrink = get_round(min(max_shrink_v1, max_shrink_v2), 2) - 0.3
    if max_shrink >= 1.5 and max_shrink < 2:
        max_shrink = max_shrink - 0.1
    elif max_shrink >= 2 and max_shrink < 3:
        max_shrink = max_shrink - 0.2
    elif max_shrink >= 3 and max_shrink < 4:
        max_shrink = max_shrink - 0.3
    elif max_shrink >= 4 and max_shrink < 5:
        max_shrink = max_shrink - 0.4
    elif max_shrink >= 5:
        max_shrink = max_shrink - 0.5
    elif max_shrink <= 0.1:
        max_shrink = 0.1

    shrink = max_shrink if max_shrink < 1 else 1
    return shrink, max_shrink



def bbox_vote(det):
    order = det[:, 4].ravel().argsort()[::-1]
    det = det[order, :]
    if det.shape[0] == 0:
        dets = np.array([[10, 10, 20, 20, 0.002]])
        det = np.empty(shape=[0, 5])
    while det.shape[0] > 0:
        # IOU
        area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
        xx1 = np.maximum(det[0, 0], det[:, 0])
        yy1 = np.maximum(det[0, 1], det[:, 1])
        xx2 = np.minimum(det[0, 2], det[:, 2])
        yy2 = np.minimum(det[0, 3], det[:, 3])
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        o = inter / (area[0] + area[:] - inter)

        # nms
        merge_index = np.where(o >= 0.3)[0]
        det_accu = det[merge_index, :]
        det = np.delete(det, merge_index, 0)
        if merge_index.shape[0] <= 1:
            if det.shape[0] == 0:
                try:
                    dets = np.row_stack((dets, det_accu))
                except:
                    dets = det_accu
            continue
        det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))
        max_score = np.max(det_accu[:, 4])
        det_accu_sum = np.zeros((1, 5))
        det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4],
                                      axis=0) / np.sum(det_accu[:, -1:])
        det_accu_sum[:, 4] = max_score
        try:
            dets = np.row_stack((dets, det_accu_sum))
        except:
            dets = det_accu_sum
    dets = dets[0:750, :]
    return dets


def infer(image, ms=False):
    shrink, max_shrink = get_shrink(image.shape[0], image.shape[1])
    det0 = detect_face(image, shrink)
    if ms:
        det1 = flip_test(image, shrink)
        [det2, det3] = multi_scale_test(image, max_shrink)
        det4 = multi_scale_test_pyramid(image, max_shrink)
        det = np.row_stack((det0, det1, det2, det3, det4))
        det = np.row_stack((det0, det1, det4))
        dets = bbox_vote(det)
    else:
        dets = det0
    keep_index = np.where(dets[:, 4] >= score_thresh)[0]
    dets = dets[keep_index, :]
    return dets

opt = get_args()
cfg.merge_from_file(opt.cfg)
# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
device = 'cuda'
model = CenterNet(cfg, topk=500, flip_test=False)
model.load_state_dict({k.replace('module.',''):v for k,v in torch.load(opt.pth, map_location=device)['model'].items()})
model = model.to(device)
score_thresh = 0.02
nms_thresh = 0.4
# input_size  = (1920, 1080)
input_size = None
model.eval()
torch.set_grad_enabled(False)

# # test speed
# for i in range(10):
#     net(torch.randn(1, 3, 800, 800).cuda())
# xt = 500
# tic = time.time()
# w, h = int(np.ceil(1920 / 32) * 32), int(np.ceil(1080 / 32) * 32)
# x = torch.randn(1, 3, h, w).cuda()
# for i in range(xt):
#     model(x)
# toc = time.time()
# print((toc-tic)/xt)


## submit
# import glob
# im_paths = glob.glob('data/personai_icartoonface_detval/*.jpg')
# f = open('result.csv', 'w')
# for idx, im_path in enumerate(im_paths):
#     print(idx)
#     i_img = cv2.imread(im_path)
#     dets = infer(i_img, ms=0)
#     im_name = os.path.basename(im_path)
#     for det in dets:
#         box, score = det[:4].astype(np.int32), det[-1]
#         f.write('%s,%d,%d,%d,%d,face,%.2f\n'%(im_name, *box, score))
# f.close()


# test
# i_img = cv2.imread(opt.img)
# dets = infer(i_img, 0)
# print(dets.shape)
# vis = 0
# if vis:
#     fig = plt.figure()
#     ax = fig.add_subplot(1, 1, 1)
#     ax.imshow(i_img[:,:,::-1])
#     for box, score in zip(bboxes, scores):
#         x1, y1, x2, y2 = box 
#         rect = plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor=(2/255, 1, 0), linewidth=1)
#         ax.add_patch(rect)
#         ax.text(x1-5, y1-5, '%.2f'%score, color='r', fontsize=8)
#     plt.draw()
#     plt.show()
#     # plt.savefig('result.jpg')
# else:
#     for det in dets:
#         box, score = det[:4], det[-1]
#         cv2.rectangle(i_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 1)
#         cv2.putText(i_img, '%.2f'%score, (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 
#                     0.5, (0, 0, 255), 1, lineType=cv2.LINE_AA)
#     cv2.imwrite('result.jpg', i_img)



import glob
im_paths = glob.glob('data/personai_icartoonface_detval/*.jpg')
for idx, im_path in enumerate(im_paths):
    print(idx)
    i_img = cv2.imread(im_path)
    dets = detect_face(i_img)
    for det in dets:
        box, score = det[:4], det[-1]
        cv2.rectangle(i_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 1)
        cv2.putText(i_img, '%.2f'%score, (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 
                    0.5, (0, 0, 255), 1, lineType=cv2.LINE_AA)
    cv2.imwrite('data/val_out/%s'%(os.path.basename(im_path)), i_img)
