# -*- coding: UTF-8 -*-

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms

import collections
import numpy as np
import cv2
import pickle
import random
import math
import json
import scipy.io
import os
import h5py

from data.dataaugmentation import *
from utils.box import bbox_iou
from utils.progressbar import *


# <WiderFaceParser/>
class WiderFaceParser(object):
    """Some Information about WiderFaceParser"""
    
    # <__init__/>
    def __init__(
        self, 
        path_to_train_images = './WIDER_train/images', 
        wider_face_train_bbx_gt_mat = './wider_face_train.mat', 
        path_to_val_images = './WIDER_val/images',
        wider_face_val_bbx_gt_mat = './wider_face_val.mat',
        wider_face_val_bbx_gt_hard_mat = './wider_face_val.mat',
        wider_face_val_bbx_gt_medium_mat = './wider_face_val.mat',
        wider_face_val_bbx_gt_easy_mat = './wider_face_val.mat',
        path_to_test_images = './WIDER_test/images',
        wider_face_test_filelist_mat = './wider_face_test.mat'
        ):
        super(WiderFaceParser, self).__init__()
        
        self.trainset = []
        self.valset = []
        self.valset_hard = []
        self.valset_medium = []
        self.valset_easy = []
        self.testset = []
        
        # load trian dataset
        try:
            fmat = scipy.io.loadmat(wider_face_train_bbx_gt_mat)
            event_list = fmat.get('event_list')
            file_list = fmat.get('file_list')            
            face_bbx_list = fmat.get('face_bbx_list')
            blur_label_list = fmat.get('blur_label_list')
            expression_label_list = fmat.get('expression_label_list')
            illumination_label_list = fmat.get('illumination_label_list')
            invalid_label_list = fmat.get('invalid_label_list')
            occlusion_label_list = fmat.get('occlusion_label_list')
            pose_label_list = fmat.get('pose_label_list')
            for event_idx, event in enumerate(event_list):
                event_name = event[0][0]
                for im_idx, im in enumerate(file_list[event_idx][0]):
                    im_name = im[0][0]
                    face_bbx = face_bbx_list[event_idx][0][im_idx][0]
                    face_bbx_blur = blur_label_list[event_idx][0][im_idx][0]
                    face_bbx_expression = expression_label_list[event_idx][0][im_idx][0]
                    face_bbx_illumination = illumination_label_list[event_idx][0][im_idx][0]
                    face_bbx_invalid = invalid_label_list[event_idx][0][im_idx][0]
                    face_bbx_occlusion = occlusion_label_list[event_idx][0][im_idx][0]
                    face_bbx_pose = pose_label_list[event_idx][0][im_idx][0]
                    bboxes = []
                    image_info_dict = {}
                    image_info_dict['name'] = im_name
                    image_info_dict['path'] = os.path.join(path_to_train_images, event_name, im_name + '.jpg')
                    image_info_dict['event'] = event_name
                    image_info_dict['bboxes'] = []
                    for i in range(face_bbx.shape[0]):
                        w = float(face_bbx[i][2])
                        h = float(face_bbx[i][3])
                        cx = float(face_bbx[i][0]) + w / 2
                        cy = float(face_bbx[i][1]) + h / 2
                        if w <= 0 or h <=0:
                            continue
                        image_info_dict['bboxes'].append( 
                            { 
                                "xywh": (cx, cy, w, h),
                                'blur': int(face_bbx_blur[i][0]),
                                'expression': int(face_bbx_expression[i][0]),
                                'illumination': int(face_bbx_illumination[i][0]),
                                'invalid': int(face_bbx_invalid[i][0]),
                                'occlusion': int(face_bbx_occlusion[i][0]),
                                'pose': int(face_bbx_pose[i][0]),
                            } 
                            )
                    self.trainset.append(image_info_dict)
        except:
            print('Error in parsing wider_face_train_bbx_gt mat')
        
        # load val dataset all
        try:
            fmat = scipy.io.loadmat(wider_face_val_bbx_gt_mat)
            event_list = fmat.get('event_list')
            file_list = fmat.get('file_list')            
            face_bbx_list = fmat.get('face_bbx_list')
            blur_label_list = fmat.get('blur_label_list')
            expression_label_list = fmat.get('expression_label_list')
            illumination_label_list = fmat.get('illumination_label_list')
            invalid_label_list = fmat.get('invalid_label_list')
            occlusion_label_list = fmat.get('occlusion_label_list')
            pose_label_list = fmat.get('pose_label_list')
            for event_idx, event in enumerate(event_list):
                event_name = event[0][0]
                for im_idx, im in enumerate(file_list[event_idx][0]):
                    im_name = im[0][0]
                    face_bbx = face_bbx_list[event_idx][0][im_idx][0]
                    face_bbx_blur = blur_label_list[event_idx][0][im_idx][0]
                    face_bbx_expression = expression_label_list[event_idx][0][im_idx][0]
                    face_bbx_illumination = illumination_label_list[event_idx][0][im_idx][0]
                    face_bbx_invalid = invalid_label_list[event_idx][0][im_idx][0]
                    face_bbx_occlusion = occlusion_label_list[event_idx][0][im_idx][0]
                    face_bbx_pose = pose_label_list[event_idx][0][im_idx][0]
                    bboxes = []
                    image_info_dict = {}
                    image_info_dict['name'] = im_name
                    image_info_dict['path'] = os.path.join(path_to_val_images, event_name, im_name + '.jpg')
                    image_info_dict['event'] = event_name
                    image_info_dict['bboxes'] = []
                    for i in range(face_bbx.shape[0]):
                        w = float(face_bbx[i][2])
                        h = float(face_bbx[i][3])
                        cx = float(face_bbx[i][0]) + w / 2
                        cy = float(face_bbx[i][1]) + h / 2
                        if w <= 0 or h <=0:
                            continue
                        image_info_dict['bboxes'].append( 
                            { 
                                "xywh": (cx, cy, w, h),
                                'blur': int(face_bbx_blur[i][0]),
                                'expression': int(face_bbx_expression[i][0]),
                                'illumination': int(face_bbx_illumination[i][0]),
                                'invalid': int(face_bbx_invalid[i][0]),
                                'occlusion': int(face_bbx_occlusion[i][0]),
                                'pose': int(face_bbx_pose[i][0]),
                            } 
                            )
                    self.valset.append(image_info_dict)
        except:
            print('Error in parsing wider_face_val_bbx_gt mat')

        # load val dataset hard
        # if True:
        try:
            fmat = scipy.io.loadmat(wider_face_val_bbx_gt_hard_mat)
            event_list = fmat.get('event_list')
            file_list = fmat.get('file_list')            
            face_bbx_list = fmat.get('face_bbx_list')
            blur_label_list = fmat.get('blur_label_list')
            expression_label_list = fmat.get('expression_label_list')
            illumination_label_list = fmat.get('illumination_label_list')
            invalid_label_list = fmat.get('invalid_label_list')
            occlusion_label_list = fmat.get('occlusion_label_list')
            pose_label_list = fmat.get('pose_label_list')
            gt_list = fmat.get('gt_list')
            for event_idx, event in enumerate(event_list):
                event_name = event[0][0]
                for im_idx, im in enumerate(file_list[event_idx][0]):
                    keep_indies = gt_list[event_idx][0][im_idx][0]
                    # import pdb; pdb.set_trace()
                    im_name = im[0][0]
                    face_bbx = face_bbx_list[event_idx][0][im_idx][0]
                    face_bbx_blur = blur_label_list[event_idx][0][im_idx][0]
                    face_bbx_expression = expression_label_list[event_idx][0][im_idx][0]
                    face_bbx_illumination = illumination_label_list[event_idx][0][im_idx][0]
                    face_bbx_invalid = invalid_label_list[event_idx][0][im_idx][0]
                    face_bbx_occlusion = occlusion_label_list[event_idx][0][im_idx][0]
                    face_bbx_pose = pose_label_list[event_idx][0][im_idx][0]
                    bboxes = []
                    image_info_dict = {}
                    image_info_dict['name'] = im_name
                    image_info_dict['path'] = os.path.join(path_to_val_images, event_name, im_name + '.jpg')
                    image_info_dict['event'] = event_name
                    image_info_dict['bboxes'] = []
                    # for i in range(face_bbx.shape[0]):
                    for k in keep_indies:
                        i = int(k-1)
                        w = float(face_bbx[i][2])
                        h = float(face_bbx[i][3])
                        cx = float(face_bbx[i][0]) + w / 2
                        cy = float(face_bbx[i][1]) + h / 2
                        if w <= 0 or h <=0:
                            continue
                        image_info_dict['bboxes'].append( 
                            { 
                                "xywh": (cx, cy, w, h),
                                'blur': int(face_bbx_blur[i][0]),
                                'expression': int(face_bbx_expression[i][0]),
                                'illumination': int(face_bbx_illumination[i][0]),
                                'invalid': int(face_bbx_invalid[i][0]),
                                'occlusion': int(face_bbx_occlusion[i][0]),
                                'pose': int(face_bbx_pose[i][0]),
                            } 
                            )
                    self.valset_hard.append(image_info_dict)
        except:
        # else:
            print('Error in parsing wider_face_val_bbx_gt hard mat')

        # load val dataset medium
        try:
            fmat = scipy.io.loadmat(wider_face_val_bbx_gt_medium_mat)
            event_list = fmat.get('event_list')
            file_list = fmat.get('file_list')            
            face_bbx_list = fmat.get('face_bbx_list')
            blur_label_list = fmat.get('blur_label_list')
            expression_label_list = fmat.get('expression_label_list')
            illumination_label_list = fmat.get('illumination_label_list')
            invalid_label_list = fmat.get('invalid_label_list')
            occlusion_label_list = fmat.get('occlusion_label_list')
            pose_label_list = fmat.get('pose_label_list')
            gt_list = fmat.get('gt_list')
            for event_idx, event in enumerate(event_list):
                event_name = event[0][0]
                for im_idx, im in enumerate(file_list[event_idx][0]):
                    keep_indies = gt_list[event_idx][0][im_idx][0]
                    # import pdb; pdb.set_trace()
                    im_name = im[0][0]
                    face_bbx = face_bbx_list[event_idx][0][im_idx][0]
                    face_bbx_blur = blur_label_list[event_idx][0][im_idx][0]
                    face_bbx_expression = expression_label_list[event_idx][0][im_idx][0]
                    face_bbx_illumination = illumination_label_list[event_idx][0][im_idx][0]
                    face_bbx_invalid = invalid_label_list[event_idx][0][im_idx][0]
                    face_bbx_occlusion = occlusion_label_list[event_idx][0][im_idx][0]
                    face_bbx_pose = pose_label_list[event_idx][0][im_idx][0]
                    bboxes = []
                    image_info_dict = {}
                    image_info_dict['name'] = im_name
                    image_info_dict['path'] = os.path.join(path_to_val_images, event_name, im_name + '.jpg')
                    image_info_dict['event'] = event_name
                    image_info_dict['bboxes'] = []
                    # for i in range(face_bbx.shape[0]):
                    for k in keep_indies:
                        i = int(k-1)
                        w = float(face_bbx[i][2])
                        h = float(face_bbx[i][3])
                        cx = float(face_bbx[i][0]) + w / 2
                        cy = float(face_bbx[i][1]) + h / 2
                        if w <= 0 or h <=0:
                            continue
                        image_info_dict['bboxes'].append( 
                            { 
                                "xywh": (cx, cy, w, h),
                                'blur': int(face_bbx_blur[i][0]),
                                'expression': int(face_bbx_expression[i][0]),
                                'illumination': int(face_bbx_illumination[i][0]),
                                'invalid': int(face_bbx_invalid[i][0]),
                                'occlusion': int(face_bbx_occlusion[i][0]),
                                'pose': int(face_bbx_pose[i][0]),
                            } 
                            )
                    self.valset_medium.append(image_info_dict)
        except:
            print('Error in parsing wider_face_val_bbx_gt medium mat')

        # load val dataset easy
        try:
            fmat = scipy.io.loadmat(wider_face_val_bbx_gt_easy_mat)
            event_list = fmat.get('event_list')
            file_list = fmat.get('file_list')            
            face_bbx_list = fmat.get('face_bbx_list')
            blur_label_list = fmat.get('blur_label_list')
            expression_label_list = fmat.get('expression_label_list')
            illumination_label_list = fmat.get('illumination_label_list')
            invalid_label_list = fmat.get('invalid_label_list')
            occlusion_label_list = fmat.get('occlusion_label_list')
            pose_label_list = fmat.get('pose_label_list')
            gt_list = fmat.get('gt_list')
            for event_idx, event in enumerate(event_list):
                event_name = event[0][0]
                for im_idx, im in enumerate(file_list[event_idx][0]):
                    keep_indies = gt_list[event_idx][0][im_idx][0]
                    im_name = im[0][0]
                    face_bbx = face_bbx_list[event_idx][0][im_idx][0]
                    face_bbx_blur = blur_label_list[event_idx][0][im_idx][0]
                    face_bbx_expression = expression_label_list[event_idx][0][im_idx][0]
                    face_bbx_illumination = illumination_label_list[event_idx][0][im_idx][0]
                    face_bbx_invalid = invalid_label_list[event_idx][0][im_idx][0]
                    face_bbx_occlusion = occlusion_label_list[event_idx][0][im_idx][0]
                    face_bbx_pose = pose_label_list[event_idx][0][im_idx][0]
                    bboxes = []
                    image_info_dict = {}
                    image_info_dict['name'] = im_name
                    image_info_dict['path'] = os.path.join(path_to_val_images, event_name, im_name + '.jpg')
                    image_info_dict['event'] = event_name
                    image_info_dict['bboxes'] = []
                    # for i in range(face_bbx.shape[0]):
                    for k in keep_indies:
                        i = int(k-1)
                        w = float(face_bbx[i][2])
                        h = float(face_bbx[i][3])
                        cx = float(face_bbx[i][0]) + w / 2
                        cy = float(face_bbx[i][1]) + h / 2
                        if w <= 0 or h <=0:
                            continue
                        image_info_dict['bboxes'].append( 
                            { 
                                "xywh": (cx, cy, w, h),
                                'blur': int(face_bbx_blur[i][0]),
                                'expression': int(face_bbx_expression[i][0]),
                                'illumination': int(face_bbx_illumination[i][0]),
                                'invalid': int(face_bbx_invalid[i][0]),
                                'occlusion': int(face_bbx_occlusion[i][0]),
                                'pose': int(face_bbx_pose[i][0]),
                            } 
                            )
                    self.valset_easy.append(image_info_dict)
        except:
            print('Error in parsing wider_face_val_bbx_gt easy mat')
        
        # load test list
        try:
            fmat = scipy.io.loadmat(wider_face_test_filelist_mat)
            event_list = fmat.get('event_list')
            file_list = fmat.get('file_list')
            for event_idx, event in enumerate(event_list):
                event_name = event[0][0]
                for im_idx, im in enumerate(file_list[event_idx][0]):            
                    im_name = im[0][0]
                    image_info_dict = {}
                    image_info_dict['name'] = im_name
                    image_info_dict['path'] = os.path.join(path_to_test_images, event_name, im_name + '.jpg')
                    image_info_dict['event'] = event_name
                    self.testset.append(image_info_dict)
        except:
            print('Error in parsing wider_face_test_filelist mat')
        
        pass
    # </__init__>

    # <save_to_trainset_json/>
    def save_to_trainset_json(self, json_file = './wider_face_trainset.json'):
        try:
            with open(json_file, 'w') as f:
                json.dump(self.trainset, f)
        except:
            print("Error in save_to_trainset_json")
            return False
        return True
    # </save_to_trainset_json>

    # <save_to_trainset_pickle/>
    def save_to_trainset_pickle(self, pickle_file = './wider_face_trainset.pickle'):
        try:
            with open(pickle_file, 'w') as f:
                pickle.dump(self.trainset, f)
        except:
            print("Error in save_to_trainset_json")
            return False
        return True        
    # </save_to_trainset_pickle>
    
    # <save_to_valset_json/>
    def save_to_valset_json(self, json_file = './wider_face_valset.json'):
        try:
            with open(json_file, 'w') as f:
                json.dump(self.valset, f)
        except:
            print("Error in save_to_valset_json")
            return False
        return True
    # </save_to_valset_json>

    # <save_to_valset_pickle/>
    def save_to_valset_pickle(self, pickle_file = './wider_face_valset.pickle'):
        try:
            with open(pickle_file, 'w') as f:
                pickle.dump(self.valset, f)
        except:
            print("Error in save_to_valset_pickle")
            return False
        return True
    # </save_to_valset_pickle>

    # <save_to_valset_hard_json/>
    def save_to_valset_hard_json(self, json_file = './wider_face_valset_hard.json'):
        try:
            with open(json_file, 'w') as f:
                json.dump(self.valset_hard, f)
        except:
            print("Error in save_to_valset_hard_json")
            return False
        return True
    # </save_to_valset_hard_json>

    # <save_to_valset_hard_pickle/>
    def save_to_valset_hard_pickle(self, pickle_file = './wider_face_valset_hard.pickle'):
        try:
            with open(pickle_file, 'w') as f:
                pickle.dump(self.valset_hard, f)
        except:
            print("Error in save_to_valset_hard_pickle")
            return False
        return True
    # </save_to_valset_hard_pickle>    
    
    # <save_to_valset_medium_json/>
    def save_to_valset_medium_json(self, json_file = './wider_face_valset_medium.json'):
        try:
            with open(json_file, 'w') as f:
                json.dump(self.valset_medium, f)
        except:
            print("Error in save_to_valset_medium_json")
            return False
        return True
    # </save_to_valset_medium_json>

    # <save_to_valset_medium_pickle/>
    def save_to_valset_medium_pickle(self, pickle_file = './wider_face_valset_medium.pickle'):
        try:
            with open(pickle_file, 'w') as f:
                pickle.dump(self.valset_medium, f)
        except:
            print("Error in save_to_valset_medium_pickle")
            return False
        return True
    # </save_to_valset_medium_pickle>

    # <save_to_valset_easy_json/>
    def save_to_valset_easy_json(self, json_file = './wider_face_valset_easy.json'):
        try:
            with open(json_file, 'w') as f:
                json.dump(self.valset_easy, f)
        except:
            print("Error in save_to_valset_easy_json")
            return False
        return True
    # </save_to_valset_easy_json>

    # <save_to_valset_easy_pickle/>
    def save_to_valset_easy_pickle(self, pickle_file = './wider_face_valset_easy.pickle'):
        try:
            with open(pickle_file, 'w') as f:
                pickle.dump(self.valset_easy, f)
        except:
            print("Error in save_to_valset_easy_pickle")
            return False
        return True
    # </save_to_valset_easy_pickle>    

    # <save_to_testset_json/>
    def save_to_testset_json(self, json_file = './wider_face_valset.json'):
        try:
            with open(json_file, 'w') as f:
                json.dump(self.testset, f)
        except:
            print("Error in save_to_testset_json")
            return False
        return True
    # </save_to_testset_json>

    # <save_to_testset_pickle/>
    def save_to_testset_pickle(self, pickle_file = './wider_face_valset.pickle'):
        try:
            with open(pickle_file, 'w') as f:
                pickle.dump(self.testset, f)
        except:
            print("Error in save_to_testset_pickle")
            return False
        return True
    # </save_to_testset_pickle>

    # <save_to_testset_hdf5/>
    def save_to_testset_hdf5(self, hdf5_file = './wider_face_valset.hdf5'):
        try:

            pass
        except:
            print("Error in save_to_testset_hdf5")
            return False
        return True
    # </save_to_testset_hdf5>    
# </WiderFaceParser>


# <class WiderFaceDataRandomCrop(object)>
class WiderFaceDataRandomCrop(object):
    # <method __init__>
    def __init__(self, min_crop_size=(512, 512)):
        self._min_crop_size = min_crop_size    
    # <method __init__>

    # <method __call__>
    def __call__(self, *args, **kwargs):
        image_np = args[0]["image"]
        boxes_np = args[0]["target"]
        # 
        image_h_origin = image_np.shape[0]
        image_w_origin = image_np.shape[1]
        # random crop ...
        crop_w = random.randint(min(image_w_origin, self._min_crop_size[0]), image_w_origin)
        crop_h = random.randint(min(image_h_origin, self._min_crop_size[1]), image_h_origin)
        crop_x1 = random.randint(0, max(0, image_w_origin - crop_w - 1))
        crop_y1 = random.randint(0, max(0, image_h_origin - crop_h - 1))
        crop_x2 = crop_x1 + crop_w
        crop_y2 = crop_y1 + crop_h
        cropped_img_np = image_np[crop_y1:crop_y2, crop_x1:crop_x2, :]
        # filting the boxes of or corp image ...
        cropped_boxes_np = boxes_np.copy()
        for box_idx in range(cropped_boxes_np.shape[0]):
            y = cropped_boxes_np[box_idx, 0]
            x = cropped_boxes_np[box_idx, 1]
            h = cropped_boxes_np[box_idx, 2]
            w = cropped_boxes_np[box_idx, 3]
            if x < crop_x1 or x >= crop_x2 or y < crop_y1 or y >= crop_y2 or h <=0 or w <=0:
                cropped_boxes_np[box_idx, 4] = -1
                continue
            # end-if
            cropped_boxes_np[box_idx, 0] = (y - crop_y1)
            cropped_boxes_np[box_idx, 1] = (x - crop_x1)
            cropped_boxes_np[box_idx, 2] = cropped_boxes_np[box_idx, 2]
            cropped_boxes_np[box_idx, 3] = cropped_boxes_np[box_idx, 3]
            pass
        # end-for
        mask = cropped_boxes_np[:, 4] >= 0
        if mask.sum() > 0:
            cropped_boxes_np = cropped_boxes_np[mask]
        else:
            cropped_boxes_np = np.zeros((0, 10)).astype(np.float32)
        # end-if
        return {"image": cropped_img_np, "target": cropped_boxes_np}
    # <method __call__>
# <class WiderFaceDataRandomCrop(object)>


# <class WiderFaceDataFilterBoxes>
class WiderFaceDataFilterBoxes(object):
    """
    WiderFaceDataFilterBoxes
    """
    # <method __init__>    
    def __init__(
        self, 
        min_h_norm = 0.04, 
        min_w_norm = 0.03, 
        blur_level = 1, 
        expression_level = 1, 
        illumination_level = 1, 
        occlusion_level = 1, 
        pose_level = 1
        ):
        self._min_h_norm = min_h_norm
        self._min_w_norm = min_w_norm 
        self._blur_level = blur_level 
        self._expression_level = expression_level
        self._illumination_level = illumination_level
        self._occlusion_level = occlusion_level
        self._pose_level = pose_level
    # <method __init__>

    # <method __call__>
    def __call__(self, *args, **kwargs):
        image_np = args[0]["image"]
        boxes_np = args[0]["target"]
        # get mask
        mask_conf = (boxes_np[:, 4] >= 0) * (boxes_np[:, 2] > self._min_h_norm * image_np.shape[0]) * (boxes_np[:, 3] > self._min_w_norm * image_np.shape[1])
        mask_blur = (boxes_np[:, 5] <= self._blur_level)
        mask_expression = (boxes_np[:, 6] <= self._expression_level)
        mask_illumination = (boxes_np[:, 7] <= self._illumination_level)
        mask_occlusion = (boxes_np[:, 8] <= self._occlusion_level)
        mask_pose = (boxes_np[:, 9] <= self._pose_level)        
        mask_all = mask_conf * mask_blur * mask_expression * mask_illumination * mask_occlusion * mask_pose
        # filter ...
        if mask_all.sum() > 0:
            boxes_np = boxes_np[mask_all]
        else:
            boxes_np = np.zeros((0, 10)).astype(np.float32)
        # end-if
        return {"image": image_np.astype(np.float32), "target": boxes_np}
    # <method __call__>
# <class WiderFaceDataFilterBoxes>


# <class WiderFaceDataImgResizeNormalize(object)>
class WiderFaceDataImgResizeNormalize(object):
    """
    WiderFaceDataImgResizeNormalize
    """
    # <method __init__>
    def __init__(self, size, norm = (1./255., 1./255., 1./255.), mean = (128, 128, 128)):
        self._size = size
        self._debug_show = False
        self._mean = mean
        self._norm = norm
    # <method __init__>

    # <method __call__>
    def __call__(self, *args, **kwargs):
        image_np = args[0]["image"]
        boxes_np = args[0]["target"]
        # rescale boxes ...
        if boxes_np.shape[0] > 0:
            boxes_np[:, 0] = boxes_np[:, 0] * (self._size[0] - 1) / (image_np.shape[0] - 1)
            boxes_np[:, 1] = boxes_np[:, 1] * (self._size[1] - 1) / (image_np.shape[1] - 1)
            boxes_np[:, 2] = boxes_np[:, 2] * self._size[0] / image_np.shape[0]
            boxes_np[:, 3] = boxes_np[:, 3] * self._size[1] / image_np.shape[1]  
        # end-if
        # resized_image ...
        image_np = cv2.resize(image_np, self._size, interpolation=cv2.INTER_LINEAR)
        image_np = (image_np - self._mean) * self._norm
        if self._debug_show:
            temp = image_np.copy()
            for idx in range(boxes_np.shape[0]):
                c = boxes_np[idx][4]
                if c > 0:
                    y = boxes_np[idx][0]
                    x = boxes_np[idx][1]
                    h = boxes_np[idx][2]
                    w = boxes_np[idx][3]                
                    temp = cv2.rectangle(temp, (int(x - w // 2), int(y - h // 2)), (int(x + w // 2), int(y + h // 2)), [0, 256, 0], 1)        
                # end-if
            # end-for
            cv2.imshow("WiderFaceDataImgResizeNormalize", temp)
            cv2.waitKey(1)
        # end-if
        return {"image": image_np.astype(np.float32), "target": boxes_np}
    # <method __call__>
# <class WiderFaceDataImgResizeNormalize(object)>


# <class WiderFaceDataToTensor(object)>
class WiderFaceDataToTensor(object):
    # <method __init__>
    def __init__(self):
        pass
    # <method __init__>

    # <method __call__>
    def __call__(self, *args, **kwargs):
        image_np = np.transpose(args[0]["image"], (2, 0, 1))
        boxes_np = args[0]["target"]
        return {"image": torch.from_numpy(image_np), "target": torch.from_numpy(boxes_np)}
    # <method __call__>
# <class WiderFaceDataToTensor(object)>


# <class WiderFaceDataBoxEncode(object)>
class WiderFaceDataBoxEncode(object):
    """
    grids (tuple of tuple): output feature size. \n
    anchors (tuple of tuple of tuple): anchor boxes size of each grid size. \n
    num_classes (int): number of object classes. \n
    ignore_threshold (float): iou threshold to ignoring the anchor boxes. \n
    """
    # <method __init__>
    def __init__(
        self,
        grids, 
        anchors,
        num_classes,
        ignore_threshold,
        ):
        # print(len(grids))
        # print(len(anchors))
        assert(len(grids) == len(anchors))
        self._grids = grids
        self._anchors = np.array(anchors).astype(np.float32)
        self._num_classes = num_classes
        self._ignore_threshold = ignore_threshold
        self._debug_show = False
    # <method __init__>
    
    # <method __call__>
    def __call__(self, *args, **kwargs):
        image = args[0]["image"]
        boxes = args[0]["target"]
        data_dict = { "image": image, "target": [ ] }
        # 
        target_boxes = torch.zeros(4096, boxes.size(1)) - 1
        nT = boxes.size(0)
        for t in range(nT):
            if boxes[t, 4] < 0:
                continue
            target_boxes[t] = boxes[t]
        # end-for
        data_dict['target'].append(target_boxes)

        nI_c, nI_h, nI_w = image.size()
        # 
        boxes[:, 0] = boxes[:, 0] / (nI_h - 1)
        boxes[:, 1] = boxes[:, 1] / (nI_w - 1)
        boxes[:, 2] = boxes[:, 2] / nI_h
        boxes[:, 3] = boxes[:, 3] / nI_w
        # 
        wh_scales = []
        pos_obj_masks = []
        neg_obj_masks = []
        conf_masks = []
        fm_t_yxhw_conf_cls = []
        # fm_b_yxhw_conf_cls = []
        if self._debug_show:
            image_show = image.numpy().transpose(1, 2, 0) / 255.0
        # endif self._debug_show
        for nL in range(len(self._grids)):            
                        # 
            nA = len(self._anchors[nL])
            nC = self._num_classes
            nG_w = self._grids[nL][0]
            nG_h = self._grids[nL][1]
            # 
            fmty = torch.zeros(nG_h, nG_w, nA)
            fmtx = torch.zeros(nG_h, nG_w, nA)
            fmth = torch.zeros(nG_h, nG_w, nA)
            fmtw = torch.zeros(nG_h, nG_w, nA)
            #
            wh_scale = torch.zeros(nG_h, nG_w, nA) 
            tconf = torch.zeros(nG_h, nG_w, nA)
            tcls =  torch.zeros(nG_h, nG_w, nA, nC)
            tcls[:, :, :, 0] = 1 # set all is background ...
            # 
            pos_obj_mask =  torch.zeros(nG_h, nG_w, nA)
            neg_obj_mask =  torch.ones(nG_h, nG_w, nA)
            # 
            boxes = boxes[boxes[..., 4] >= 0]
            nT = boxes.size(0) # number of targets
            # 
            for t in range(nT): # "t" is target index
                if boxes[t, 4] < 0:
                    continue
                # print("box: ", boxes[t])
                iy = boxes[t, 0] * (nI_h - 1) # Center y of the ground turth box
                ix = boxes[t, 1] * (nI_w - 1) # Center x of the ground turth box
                ih = boxes[t, 2] * nI_h # Height of the ground turth box
                iw = boxes[t, 3] * nI_w # Width of the ground turth box
                # 
                gy = boxes[t, 0] * (nG_h - 1) # Center y of the ground turth box
                gx = boxes[t, 1] * (nG_w - 1) # Center x of the ground turth box
                gh = boxes[t, 2] * nG_h # Height of the ground turth box
                gw = boxes[t, 3] * nG_w # Width of the ground turth box
                # get grid position(gj, gi) that the target box is belong to.
                gj = int(gy)
                gi = int(gx)
                # 
                gt_box = torch.FloatTensor([(0, 0, ih, iw) for _ in range(nA)]) # gt_box = torch.FloatTensor([0, 0, gw, gh]).unsqueeze(0)
                anchor_shapes = torch.cat( ( torch.zeros(nA, 2), torch.from_numpy(self._anchors[nL]) ) , 1)
                anch_ious = bbox_iou(gt_box, anchor_shapes, x1y1x2y2=False) # centers of gt_box and anchors are coincident
                best_val, best_n = anch_ious.max(0)
                #                 
                wh_scale[gj, gi, best_n] = 2 - boxes[t, 2] * boxes[t, 3]
                pos_obj_mask[gj, gi, best_n] = 1
                neg_obj_mask[gj, gi, best_n] = 0
                neg_obj_mask[gj, gi, anch_ious > self._ignore_threshold] = 0
                # 
                fmty[gj, gi, best_n] = gy - gj
                fmtx[gj, gi, best_n] = gx - gi
                fmth[gj, gi, best_n] = torch.log(ih / self._anchors[nL][best_n][0]) # 
                fmtw[gj, gi, best_n] = torch.log(iw / self._anchors[nL][best_n][1]) # 
                #                 
                tconf[gj, gi, best_n] = 1 # nL + 1
                tcls[gj, gi, best_n, int(boxes[t, 4])] = 1 # boxes[t, 5]
                # 
                if self._debug_show:
                    assert(iy == int((fmty[gj, gi, best_n] + gj) / (nG_h-1) * (nI_h-1)))
                    assert(ix == int((fmtx[gj, gi, best_n] + gi) / (nG_w-1) * (nI_w-1)))
                    assert(ih == int((math.exp(fmth[gj, gi, best_n]) * self._anchors[nL][best_n][0])))
                    assert(iw == int((math.exp(fmtw[gj, gi, best_n]) * self._anchors[nL][best_n][1])))
                    image_show = cv2.rectangle(image_show, (int(ix - iw // 2), int(iy - ih // 2)), (int(ix + iw // 2), int(iy + ih // 2)), [0, 256, 0], 1)
                # end-if
            # end-for
            fm_t_yxhw_conf_cls.append(
                torch.cat(
                    (
                        fmty.unsqueeze(3), 
                        fmtx.unsqueeze(3), 
                        fmth.unsqueeze(3), 
                        fmtw.unsqueeze(3), 
                        tconf.unsqueeze(3), 
                        tcls
                    ), 
                    3
                ).view(-1, 4+1+nC)
            ) # [[nGh, nGw, nA, 4+1+nC]]
            pos_obj_masks.append(pos_obj_mask.view(-1))
            neg_obj_masks.append(neg_obj_mask.view(-1))
            wh_scales.append(wh_scale.view(-1))
            # data_dict['target'].append((fm_t_yxhw_conf_cls[nL], pos_obj_masks[nL], neg_obj_masks[nL]))
        # endfor
        data_dict['target'].append( torch.cat(fm_t_yxhw_conf_cls, 0) )
        data_dict['target'].append( torch.cat(pos_obj_masks, 0) )
        data_dict['target'].append( torch.cat(neg_obj_masks, 0) )
        data_dict['target'].append( torch.cat(wh_scales, 0) )
        if self._debug_show:
            cv2.imshow("WiderFaceDataBoxEncode", image_show)
            cv2.waitKey(1) 
        # endif
        return data_dict
    # <method __call__>
# <class WiderFaceDataBoxEncode(object)>


# <class WiderFaceDataset>
class WiderFaceDataset(torch.utils.data.Dataset):
    """
    WiderFaceDataset is a Dataset
    """
    # <method __init__>
    def __init__(self, json_path = None, pickle_path = None, transform=None, pre_load_images_to_ram = False):
        if json_path:
            with open(json_path, 'r') as fw:
                self.data = json.load(fw)
        elif pickle_path:
            with open(pickle_path,'rb') as fw:
                self.data = pickle.load(fw)
        else:
            RuntimeError("Error in WiderFaceDataset.__init__, both json_path and pickle_path are None")
        # endif
        max_box = 0
        for dat in self.data:
            if max_box < len(dat["bboxes"]):
                max_box = len(dat["bboxes"])
            # endif
        # endfor
        print("max boxes per image is: ", max_box)
        self._transform = transform
        self._pre_load_images_to_ram = pre_load_images_to_ram
        if self._pre_load_images_to_ram:
            pbar = ProgressBar(Title = "Pre-loading Images", Total=len(self.data))
            for index in range(len(self.data)):
                self.data[index]["image"] = cv2.imread(self.data[index]["path"], 1)
                pbar.update()
            # end-for
            pbar.close()
        # end-if
        pass
    # <method __init__>
    
    # <method __len__>
    def __len__(self):
        return len(self.data)
    # <method __len__>

    # <method __getitem__>
    def __getitem__(self, index):
        # load data ...
        img_path, bboxes = self.data[index]["path"], self.data[index]["bboxes"]
        # get numpy array of image
        if self._pre_load_images_to_ram:
            image_np = self.data[index]["image"]
        else:
            image_np = cv2.imread(img_path, 1)
        # end-if
        # get numpy array of boxes
        remain_boxes = []
        for bbox in bboxes:
            x = bbox['xywh'][0] # center x of bounding box in origin image
            y = bbox['xywh'][1] # center y of bounding box in origin image
            w = bbox['xywh'][2] # width of bounding box in origin image
            h = bbox['xywh'][3] # height of bounding box in origin image
            blur = bbox['blur']
            expression = bbox['expression']
            illumination = bbox['illumination']
            occlusion = bbox['occlusion']
            pose = bbox['pose']
            # 
            bbox_np = np.zeros(10).astype(np.float32)
            bbox_np[0] = y
            bbox_np[1] = x
            bbox_np[2] = h
            bbox_np[3] = w
            bbox_np[4] = 0 # class, only face class, so is 0
            bbox_np[5] = blur
            bbox_np[6] = expression
            bbox_np[7] = illumination
            bbox_np[8] = occlusion
            bbox_np[9] = pose
            # 
            remain_boxes.append(np.expand_dims(bbox_np, axis=0))
        # endfor
        if len(remain_boxes):
            boxes_np = np.concatenate(remain_boxes, axis=0)
        else:
            boxes_np = np.zeros((0, 10)).astype(np.float32)
        # endif
        data = {"image": image_np, "target": boxes_np}
        if self._transform:
            data = self._transform(data)
        # endif
        return data
    # <method __getitem__>
# <class WiderFaceDataset>


# <function generateDataSetJsonFiles>
def generateDataSetJsonFiles():
    path_to_train_images = './data/wider_face/WIDER_train/images'
    path_to_val_images = './data/wider_face/WIDER_val/images'
    path_to_test_images = './data/wider_face/WIDER_test/images'
    wider_face_train_bbx_gt_mat = './data/wider_face/wider_face_split/wider_face_train.mat'
    wider_face_val_bbx_gt_mat = './data/wider_face/wider_face_split/wider_face_val.mat'
    wider_face_val_bbx_gt_hard_mat = './data/wider_face/wider_face_split/wider_hard_val.mat'
    wider_face_val_bbx_gt_medium_mat = './data/wider_face/wider_face_split/wider_medium_val.mat'
    wider_face_val_bbx_gt_easy_mat = './data/wider_face/wider_face_split/wider_easy_val.mat'
    wider_face_test_filelist_mat = './data/wider_face/wider_face_split/wider_face_test.mat'
    parser = WiderFaceParser(
        path_to_train_images, 
        wider_face_train_bbx_gt_mat, 
        path_to_val_images, 
        wider_face_val_bbx_gt_mat, 
        wider_face_val_bbx_gt_hard_mat,
        wider_face_val_bbx_gt_medium_mat,
        wider_face_val_bbx_gt_easy_mat,
        path_to_test_images, 
        wider_face_test_filelist_mat
        )
    parser.save_to_trainset_json('./data/wider_face//WIDER_train/wider_face_train.json')
    parser.save_to_valset_json('./data/wider_face//WIDER_val/wider_face_val.json')
    parser.save_to_valset_hard_json('./data/wider_face//WIDER_val/wider_hard_val.json')
    parser.save_to_valset_medium_json('./data/wider_face//WIDER_val/wider_medium_val.json')
    parser.save_to_valset_easy_json('./data/wider_face//WIDER_val/wider_easy_val.json')
    parser.save_to_testset_json('./data/wider_face//WIDER_test/wider_face_test.json')    
# <function generateDataSetJsonFiles>

# <function testWiderFaceDataset>
def testWiderFaceDataset():
    # grids = [[13,13],[26,26],[52,52]]
    # anchors = [[[116,90],[156,198],[373,326]],[[30,61],[62,45],[59,119]],[[10,13],[16,30],[33,23]]]
    grids = [[13,13]]
    anchors = [[[10,13],[16,30],[33,23],[30,61],[62,45],[59,119],[116,90],[156,198],[373,326]]]
    num_classes = 1
    ignore_threshold = 0.5
    # new a Dataset and DataLoader
    dataset = WiderFaceDataset(
        json_path='./data/wider_face/WIDER_val/wider_face_val.json', 
        transform=transforms.Compose([
            ObjDetDataAugRandomHorizontalFlip(),
            # ObjDetDataAugRandomVerticalFlip(),            
            ObjDetDataAugRandomRotate(max_rotate_angle=15),
            # WiderFaceDataRandomCrop(min_crop_size = (256, 256)), 
            WiderFaceDataFilterBoxes(min_h_norm = 0.02, min_w_norm = 0.02, blur_level = 2, expression_level = 1, illumination_level = 1, occlusion_level = 1, pose_level = 1),            
            WiderFaceDataImgResizeNormalize(size = (416, 416), norm = (1./255., 1./255., 1./255.), mean = (128, 128, 128)),
            WiderFaceDataToTensor(),
            WiderFaceDataBoxEncode(grids = grids, anchors = anchors, num_classes = num_classes, ignore_threshold = ignore_threshold)
            ]),
        pre_load_images_to_ram=False
        )
    dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=True, num_workers=0)
    # enumerate ...
    nL = len(anchors)
    nC = num_classes
    for i_batch, sample_batched in enumerate(dataloader):        
        batch_imgs = sample_batched["image"]
        batch_targets = sample_batched["target"]                        
        import pdb; pdb.set_trace()
        nB = batch_imgs.size(0)
        for b in range(nB):
            image = batch_imgs[b]
            img_numpy = np.transpose(image.numpy(), (1, 2, 0)) + 0.5
            img_show = img_numpy.copy()
            target_boxes = batch_targets[0][b]
            target_boxes = target_boxes[target_boxes[..., 4] >= 0]
            # draw target boxes ...
            for box in target_boxes:
                y = box[0]
                x = box[1]
                h = box[2]
                w = box[3]
                img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h // 2)), [0, 256, 256], 1)
            # end-for 
            cv2.imshow("img_show", img_show)
            key = cv2.waitKey(0)  
            if key & 0xFF == ord('q'):
                assert(0)  
        # end-for
    # end-for
# <function testWiderFaceDataset>

# <__main__>
if __name__ == '__main__':
    # generateDataSetJsonFiles()
    testWiderFaceDataset()
    pass
# <__main__>


        

        
#     # for i_batch, sample_batched in enumerate(dataloader):        
#     #     image =         sample_batched["image"][0]
#     #     target_boxes =  sample_batched["target"][0][0]
#     #     fm_boxes =      sample_batched["target"][1][0]
#     #     pos_masks =     sample_batched["target"][2][0]
#     #     neg_masks =     sample_batched["target"][3][0]
#     #     # # import pdb; pdb.set_trace()
#     #     img_numpy = np.transpose(image.numpy(), (1, 2, 0)) + 0.5
#     #     img_show = img_numpy.copy()
#     #     # draw target boxes ...
#     #     for box in target_boxes:
#     #         if box[4] == 0:
#     #             continue
#     #         y = box[0]
#     #         x = box[1]
#     #         h = box[2]
#     #         w = box[3]
#     #         img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h // 2)), [0, 256, 256], 2)
#     #     # end-for
#     #     nC = num_classes
#     #     nL = len(anchors)    
#     #     pred_box_index = int(0)
#     #     for l in range(nL):
#     #         nG_h = grids[l][0]
#     #         nG_w = grids[l][1]
#     #         nA = len(anchors[l])
#     #         for gj in range(nG_h):
#     #             for gi in range(nG_w):
#     #                 for n in range(nA):
#     #                     # # import pdb; pdb.set_trace()
#     #                     if pos_masks[pred_box_index] > 0:
#     #                         anchor = anchors[l][n]
#     #                         box_pred = fm_boxes[pred_box_index].clone()
#     #                         y = (box_pred[0] + gj) / (nG_h - 1) * (img_numpy.shape[0] - 1)
#     #                         x = (box_pred[1] + gi) / (nG_w - 1) * (img_numpy.shape[1] - 1)
#     #                         h = math.exp(box_pred[2]) * anchor[0]
#     #                         w = math.exp(box_pred[3]) * anchor[1]
#     #                         img_show = cv2.rectangle(
#     #                             img_show, 
#     #                             (int(x - w / 2), int(y - h / 2)), 
#     #                             (int(x + w / 2), int(y + h / 2)), 
#     #                             [256, 0, 0], 
#     #                             1
#     #                             )
#     #                     # end-if
#     #                     pred_box_index += 1
#     #                 # end-for
#     #             # end-for
#     #         # end-for
#     #     # end-for       
# # <function testWiderFaceDataset>



# if False:
#     dataset = WiderFaceDataset(
#         json_path='./data/wider_face/WIDER_val/wider_face_val.json', 
#         transform=transforms.Compose([
#             WiderFaceDataRandomCrop((512, 512)), 
#             WiderFaceDataImgResizeNormalize((416, 416)),
#             WiderFaceDataToTensor(),
#             WiderFaceDataBoxEncode(grids = grids, anchors = anchors, num_classes = num_classes, ignore_threshold = ignore_threshold)
#             ])
#         )
#     dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
#     for i_batch, sample_batched in enumerate(dataloader):
#         if i_batch > 100:
#             break
        
#         img = np.transpose(sample_batched["image"][0].numpy(), (1, 2, 0)) + 0.5
#         img_show = img.copy()
#         boxes = sample_batched["target"][0][0].numpy()
#         boxes_grids = []
#         offset = 0
#         for g in range(len(grids)):
#             nG_h = grids[g][0]
#             nG_w = grids[g][1]
#             num_in_this_grid_size = nG_h * nG_w * len(anchors[g])
#             if g == len(grids) - 1:
#                 boxes_grids.append( boxes[offset:, :].reshape( nG_h, nG_w, len(anchors[g]), 5 + num_classes) )    
#             else:            
#                 boxes_grids.append( boxes[offset:offset+num_in_this_grid_size, :].reshape( nG_h, nG_w, len(anchors[g]), 5 + num_classes) )
#             # endif
#             offset = offset+num_in_this_grid_size
#         # endfor
#         for g in range(len(grids)):
#             nG_h = grids[g][0]
#             nG_w = grids[g][1]
#             for gj in range(nG_h):
#                 for gi in range(nG_w):
#                     for a in range(len(anchors[g])):
#                         if boxes_grids[g][gj][gi][a][4] != 0:
#                             anchor = anchors[g][a]
#                             # 
#                             y = (boxes_grids[g][gj][gi][a][0] + gj) / nG_h * img.shape[0]
#                             x = (boxes_grids[g][gj][gi][a][1] + gi) / nG_w * img.shape[1]
#                             h = math.exp(boxes_grids[g][gj][gi][a][2]) * anchor[0] / nG_h * img.shape[0]
#                             w = math.exp(boxes_grids[g][gj][gi][a][3]) * anchor[1] / nG_w * img.shape[1]
#                             # 
#                             img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h // 2)), [0, 256, 0], 1)
#         # endfor
#         cv2.imshow("img_show", img_show)
#         cv2.waitKey(0)
# else:
#     dataset_eval = WiderFaceDataset(
#         json_path='./data/wider_face/WIDER_val/wider_face_val.json', 
#         transform=transforms.Compose([
#             WiderFaceDataRandomCrop((512, 512)), 
#             WiderFaceDataImgResizeNormalize((416, 416)),
#             WiderFaceDataToTensor(),
#             WiderFaceDataBoxEncodeEval(grids = grids, anchors = anchors, num_classes = num_classes, ignore_threshold = ignore_threshold)
#             ])
#         )
#     dataloader_eval = torch.utils.data.DataLoader(dataset=dataset_eval, batch_size=1, shuffle=False, num_workers=0)
#     for i_batch, sample_batched in enumerate(dataloader_eval):
#         if i_batch > 100:
#             break
#         img = np.transpose(sample_batched["image"][0].numpy(), (1, 2, 0)) + 0.5
#         img_show = img.copy()
#         boxes = sample_batched["target"][0][0].numpy()
#         boxes_grids = []
#         offset = 0
#         for g in range(len(grids)):
#             nG_h = grids[g][0]
#             nG_w = grids[g][1]
#             num_in_this_grid_size = nG_h * nG_w * len(anchors[g])
#             if g == len(grids) - 1:
#                 boxes_grids.append( boxes[offset:, :].reshape( nG_h, nG_w, len(anchors[g]), 5 + num_classes) )    
#             else:            
#                 boxes_grids.append( boxes[offset:offset+num_in_this_grid_size, :].reshape( nG_h, nG_w, len(anchors[g]), 5 + num_classes) )
#             # endif
#             offset = offset+num_in_this_grid_size
#         # endfor
#         for g in range(len(grids)):
#             nG_h = grids[g][0]
#             nG_w = grids[g][1]
#             for gj in range(nG_h):
#                 for gi in range(nG_w):
#                     for a in range(len(anchors[g])):
#                         if boxes_grids[g][gj][gi][a][4] != 0:
#                             anchor = anchors[g][a]
#                             # 
#                             y = boxes_grids[g][gj][gi][a][0]
#                             x = boxes_grids[g][gj][gi][a][1]
#                             h = boxes_grids[g][gj][gi][a][2]
#                             w = boxes_grids[g][gj][gi][a][3]
#                             # 
#                             img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h // 2)), [0, 256, 0], 1)
#         # endfor
#         cv2.imshow("img_show", img_show)
#         cv2.waitKey(0)  
# # endif
# cv2.destroyAllWindows()
        
        

# # <class WiderFaceDataBoxEncodeEval(object)>
# class WiderFaceDataBoxEncodeEval(object):
#     """
#     grids (tuple of tuple): output feature size. \n
#     anchors (tuple of tuple of tuple): anchor boxes size of each grid size. \n
#     num_classes (int): number of object classes. \n
#     ignore_threshold (float): iou threshold to ignoring the anchor boxes. \n
#     """
#     # <method __init__>
#     def __init__(
#         self,
#         grids = (
#             (13, 13),
#             (26, 26), 
#             (52, 52),
#             ), 
#         anchors = (
#             ((116,90), (156,198), (373,326)),
#             ((30, 61), (62,  45), (59, 119)), 
#             ((10, 13), (16,  30), (33, 23)), 
#             ),
#         num_classes = 2,
#         ignore_threshold = 0.5,
#         ):
#         # print(len(grids))
#         # print(len(anchors))
#         assert(len(grids) == len(anchors))
#         self._grids = grids
#         self._anchors = np.array(anchors).astype(np.float32)
#         self._num_classes = num_classes
#         self._ignore_threshold = ignore_threshold
#         self._debug_show = False
#     # <method __init__>
    
#     # <method __call__>
#     def __call__(self, *args, **kwargs):
#         image = args[0]["image"]
#         boxes = args[0]["target"]
#         # 
#         nI_c, nI_h, nI_w = image.size()
#         # 
#         boxes[:, 0] = boxes[:, 0] / (nI_h-1)
#         boxes[:, 1] = boxes[:, 1] / (nI_w-1)
#         boxes[:, 2] = boxes[:, 2] / nI_h
#         boxes[:, 3] = boxes[:, 3] / nI_w
#         # 
#         masks = []
#         conf_masks = []
#         fms = []
#         if self._debug_show:
#             image_show = image.numpy().transpose(1, 2, 0) / 255.0
#         # 
#         for nL in range(len(self._grids)):            
#             nT = boxes.size(0) # number of targets
#             nA = len(self._anchors[nL])
#             nC = self._num_classes
#             nG_w = self._grids[nL][0]
#             nG_h = self._grids[nL][1]
#             # 
#             ty =    torch.zeros(nG_h, nG_w, nA, 1)
#             tx =    torch.zeros(nG_h, nG_w, nA, 1)
#             th =    torch.zeros(nG_h, nG_w, nA, 1)
#             tw =    torch.zeros(nG_h, nG_w, nA, 1)
#             tconf = torch.zeros(nG_h, nG_w, nA, 1)
#             tcls =  torch.zeros(nG_h, nG_w, nA, nC)
#             # 
#             mask =  torch.zeros(nG_h, nG_w, nA)
#             conf_mask = torch.ones(nG_h, nG_w, nA)
#             # 
#             for t in range(nT): # "t" is target index
#                 if boxes[t, 4] < 0:
#                     continue
#                 gy = boxes[t, 0] * (nG_h-1) # Center y of the ground turth box
#                 gx = boxes[t, 1] * (nG_w-1) # Center x of the ground turth box
#                 gh = boxes[t, 2] * nG_h # Height of the ground turth box
#                 gw = boxes[t, 3] * nG_w # Width of the ground turth box
#                 # get grid position(gj, gi) that the target box is belong to.
#                 gj = int(gy)
#                 gi = int(gx)
#                 # 
#                 gt_box = torch.FloatTensor([(0, 0, gh, gw) for _ in range(nA)]) # gt_box = torch.FloatTensor([0, 0, gw, gh]).unsqueeze(0)
#                 anchor_shapes = torch.cat( ( torch.zeros(nA, 2), torch.from_numpy(self._anchors[0]) ) , 1)
#                 anch_ious = bbox_iou(gt_box, anchor_shapes)
#                 conf_mask[gj, gi, anch_ious > self._ignore_threshold] = 0 # Where the overlap is larger than threshold set mask to zero (ignore)
#                 best_n = torch.argmax(anch_ious)
#                 # 
#                 mask[gj, gi, best_n] = 1
#                 conf_mask[gj, gi, best_n] = 1
#                 # 
#                 ty[gj, gi, best_n] = gy / (nG_h-1) * (nI_h-1)
#                 tx[gj, gi, best_n] = gx / (nG_w-1) * (nI_w-1)
#                 th[gj, gi, best_n] = gh / nG_h * nI_h
#                 tw[gj, gi, best_n] = gw / nG_w * nI_w
#                 tconf[gj, gi, best_n] = 1 # nL + 1
#                 tcls[gj, gi, best_n, int(boxes[t, 4])] = 1
#                 # debug show ...
#                 if self._debug_show:
#                     y = ty[gj, gi, best_n]
#                     x = tx[gj, gi, best_n]
#                     h = th[gj, gi, best_n]
#                     w = tw[gj, gi, best_n]
#                     image_show = cv2.rectangle(image_show, (int(x - w // 2), int(y - h // 2)), (int(x + w // 2), int(y + h // 2)), [0, 256, 0], 1)
#                 # endif
#             # endfor
#             fm = torch.cat((ty, tx, th, tw, tconf, tcls), 3)
#             # 
#             fms.append(fm.view(-1, 4 + 1 + nC)) # [nGh * nGw * nA, 4+1+nC]
#             masks.append(mask.view(-1, 1))
#             conf_masks.append(conf_mask.view(-1, 1))
#         # endfor
#         if self._debug_show:
#             cv2.imshow("WiderFaceDataBoxEncode", image_show)
#             cv2.waitKey(1) 
#         # endif
#         return {"image": image, "target": (torch.cat(fms, 0), torch.cat(masks, 0), torch.cat(conf_masks, 0))}
#     # <method __call__>
# # <class WiderFaceDataBoxEncodeEval(object)>
