
import sys
import os
import os.path as osp
import numpy as np
import cv2
import time
import torch
import argparse
import matplotlib.pyplot as plt
import pandas as pd

sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from lib.nn.cenetrnet import CenterNet
from lib.config import cfg, update_config
from lib.utils.nms import py_cpu_nms, py_cpu_softnms
from lib.ops.nms.nms_wrapper import soft_nms, nms
from lib.utils.metric import AverageMeter
from lib.utils.mean_ap import eval_map

def get_args():
    parser = argparse.ArgumentParser(description='CenterFace Detection')
    parser.add_argument('--cfg', default='', type=str, help='Config file path')
    parser.add_argument('--pth', default='', type=str, help='Weight path')
    parser.add_argument('--img', default='', type=str)
    return parser.parse_args()

def pre_process(img):
    img = img[:, :, ::-1]
    img = img.astype(np.float32)
    img = (img - 127.5) / 128.0
    img = img.transpose(2, 0, 1)
    return img



def detect_face(image, side):
    h, w = image.shape[:2]
    
    in_size = (side, side)
    if side is None: # original scale
        new_w, new_h = w, h
        new_w, new_h = int(np.ceil(new_w / 32) * 32), int(np.ceil(new_h / 32) * 32)
        rimg = cv2.resize(image, (new_w, new_h))
        scale = (w/new_w, h/new_h)
    else:
        input_w, input_h  = in_size
        input_w, input_h = int(np.ceil(input_w / 32) * 32), int(np.ceil(input_h / 32) * 32)
        sw = w/input_w
        sh = h/input_h
        s = sw if sw > sh else sh
        simg = cv2.resize(image, dsize=None, fx=1.0/s, fy=1.0/s)
        rimg = np.zeros((input_h, input_w, 3), dtype=np.uint8)
        rimg[:simg.shape[0], :simg.shape[1], :] = simg
        scale=[s, s]
    
    
    inputs = pre_process(rimg)
    inputs = torch.Tensor([inputs]).to(device)
    bboxes, scores, clses = model(inputs)
    bboxes = bboxes.cpu().numpy()
    scores = scores.cpu().reshape(scores.shape[0], -1).numpy()

    keep = scores>score_thresh
    
    _bboxes = bboxes[keep]
    _scores = scores[keep]    

    _bboxes[:, ::2]*=scale[0]
    _bboxes[:, 1::2]*=scale[1]
    _bboxes[:, ::2] = np.clip(_bboxes[:, ::2], 0, w-1)
    _bboxes[:, 1::2] = np.clip(_bboxes[:, 1::2], 0, h-1)
    
    # do nms
    dets = np.hstack((_bboxes, _scores[:, np.newaxis])).astype(np.float32, copy=False)
    # keep = py_cpu_nms(dets, nms_thresh)
    # dets = dets[keep]

    dets, _ = nms(dets, nms_thresh, 0)
    return dets


def flip_test(image, side):
    img = cv2.flip(image, 1)
    det_f = detect_face(img, side)
    det_t = np.zeros(det_f.shape)
    det_t[:, 0] = image.shape[1] - det_f[:, 2]
    det_t[:, 1] = det_f[:, 1]
    det_t[:, 2] = image.shape[1] - det_f[:, 0]
    det_t[:, 3] = det_f[:, 3]
    det_t[:, 4] = det_f[:, 4]
    return det_t



def infer(image, ms=False):
    scales = [320, 480, 640, 800]
    dets = np.zeros((0, 5), dtype=np.float32)
    if ms:
        # det_f = flip_test(image, None)
        # dets = np.row_stack((dets, det_f))
        for scale in scales:
            det = detect_face(image, scale)
            # det_f = flip_test(image, scale)
            dets = np.row_stack((dets, det))
    else:
        dets = detect_face(image, None)
    order = dets[:, 4].ravel().argsort()[::-1]
    dets = dets[order, :]
    dets, _ = nms(dets, 0.3, 0)    
    keep_index = np.where(dets[:, 4] >= score_thresh)[0]
    dets = dets[keep_index, :]
    dets = dets[0:150, :]
    return dets

opt = get_args()
cfg.merge_from_file(opt.cfg)
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
device = 'cuda'
model = CenterNet(cfg, topk=100, flip_test=False)
model.load_state_dict({k.replace('module.',''):v for k,v in torch.load(opt.pth, map_location=device)['model'].items()})
model = model.to(device)
score_thresh = 0.02
nms_thresh = 0.4
model.eval()
torch.set_grad_enabled(False)



## submit
import glob
im_paths = glob.glob('data/personai_icartoonface_detval/*.jpg')
f = open('result.csv', 'w')
for idx, im_path in enumerate(im_paths):
    print(idx)
    i_img = cv2.imread(im_path)
    dets = infer(i_img, ms=1)
    im_name = os.path.basename(im_path)
    for det in dets:
        box, score = det[:4].astype(np.int32), det[-1]
        f.write('%s,%d,%d,%d,%d,face,%.2f\n'%(im_name, *box, score))
f.close()


# test
# i_img = cv2.imread(opt.img)
# dets = infer(i_img, 1)
# print(dets.shape)
# vis = 0
# if vis:
#     fig = plt.figure()
#     ax = fig.add_subplot(1, 1, 1)
#     ax.imshow(i_img[:,:,::-1])
#     for box, score in zip(bboxes, scores):
#         x1, y1, x2, y2 = box 
#         rect = plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor=(2/255, 1, 0), linewidth=1)
#         ax.add_patch(rect)
#         ax.text(x1-5, y1-5, '%.2f'%score, color='r', fontsize=8)
#     plt.draw()
#     plt.show()
#     # plt.savefig('result.jpg')
# else:
#     for det in dets:
#         box, score = det[:4], det[-1]
#         cv2.rectangle(i_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 1)
#         cv2.putText(i_img, '%.2f'%score, (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 
#                     0.5, (0, 0, 255), 1, lineType=cv2.LINE_AA)
#     cv2.imwrite('result.jpg', i_img)




# import glob
# im_paths = glob.glob('data/personai_icartoonface_detval/*.jpg')
# for idx, im_path in enumerate(im_paths):
#     print(idx)
#     i_img = cv2.imread(im_path)
#     dets = detect_face(i_img)
#     for det in dets:
#         box, score = det[:4], det[-1]
#         cv2.rectangle(i_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 1)
#         cv2.putText(i_img, '%.2f'%score, (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 
#                     0.5, (0, 0, 255), 1, lineType=cv2.LINE_AA)
#     cv2.imwrite('data/val_out/%s'%(os.path.basename(im_path)), i_img)


# root_path = cfg.DATASET.ROOT
# anno_path = cfg.DATASET.ANNO_ROOT
# in_width, in_height = cfg.DATASET.IMAGE_SIZE

# pdcsv = pd.read_csv(anno_path)
# frames = {}
# for idx in range(len(pdcsv)):
#     im_name = pdcsv.iloc[idx, 0]
#     x1, y1, x2, y2 = pdcsv.iloc[idx, 1:]
#     if im_name not in frames:
#         frames[im_name]=[]
#     frames[im_name].append([x1, y1, x2, y2])

# frames = list(frames.items())
# val_num = 100 #int(len(frames)*0.3)
# # print('val_num: ', val_num)
# annotations = []
# det_results = []
# cnt = 0
# for frame in frames:#[len(frames)-val_num:]:
#     # if cnt>20:
#     #     break
#     print(cnt)
#     k, v = frame
#     i_img = cv2.imread(osp.join(root_path, k))
#     dets = detect_face(i_img)
#     det_results.append([dets])

#     anno = {}
#     anno['bboxes'] = np.array(v)
#     anno['labels'] = np.ones(len(v))
#     annotations.append(anno)
#     cnt += 1
    
    

# mean_ap, eval_results = eval_map(det_results, annotations)
# print(mean_ap)