import os
import sys
import json

import cv2
import numpy as np
import scipy.io

import torch

from data.retinaface.dataaugmentation import *

# <Class: WiderFaceParser/>
class WiderFaceParser(object):
    """Some Information about WiderFaceParser"""    
    
    # <Method: ParseOfficalTrainValMat/>
    @staticmethod
    def ParseOfficalTrainValMat(matfile, jsonfile='./data/wider_face/retinaface_gt_v1.1/train_val.json'):
        dataset = { }
        fmat = scipy.io.loadmat(matfile)
        event_list = fmat.get('event_list')
        file_list = fmat.get('file_list')            
        face_bbx_list = fmat.get('face_bbx_list')
        blur_label_list = fmat.get('blur_label_list')
        expression_label_list = fmat.get('expression_label_list')
        illumination_label_list = fmat.get('illumination_label_list')
        invalid_label_list = fmat.get('invalid_label_list')
        occlusion_label_list = fmat.get('occlusion_label_list')
        pose_label_list = fmat.get('pose_label_list')
        gt_list = None
        try:
            gt_list = fmat.get('gt_list')
        except:
            print("is not a subset.")
        # end-if
        for event_idx, event in enumerate(event_list):
            event_name = event[0][0]
            for im_idx, im in enumerate(file_list[event_idx][0]):                
                im_name = im[0][0]
                face_bbx = face_bbx_list[event_idx][0][im_idx][0]
                face_bbx_blur = blur_label_list[event_idx][0][im_idx][0]
                face_bbx_expression = expression_label_list[event_idx][0][im_idx][0]
                face_bbx_illumination = illumination_label_list[event_idx][0][im_idx][0]
                face_bbx_invalid = invalid_label_list[event_idx][0][im_idx][0]
                face_bbx_occlusion = occlusion_label_list[event_idx][0][im_idx][0]
                face_bbx_pose = pose_label_list[event_idx][0][im_idx][0]
                bboxes = []
                image_info_dict = {}
                image_info_dict['name'] = im_name
                image_info_dict['event'] = event_name
                image_info_dict['bboxes'] = []
                keep_indies = None
                if gt_list is not None:
                    keep_indies = gt_list[event_idx][0][im_idx][0]
                # end-if
                for i in range(face_bbx.shape[0]):
                    if keep_indies is not None:
                        kidx = i+1
                        if kidx in keep_indies: 
                            pass
                        else:
                            continue
                        # end-if
                    # end-if
                    w = float(face_bbx[i][2])
                    h = float(face_bbx[i][3])
                    cx = float(face_bbx[i][0]) + w / 2
                    cy = float(face_bbx[i][1]) + h / 2
                    if w <= 0 or h <=0:
                        continue
                    # end-if
                    image_info_dict['bboxes'].append({ 
                        "x1y1x2y2": [float(face_bbx[i][0]), float(face_bbx[i][1]), float(face_bbx[i][0])+float(face_bbx[i][2]), float(face_bbx[i][1])+float(face_bbx[i][3])],
                        'blur': int(face_bbx_blur[i][0]),
                        'expression': int(face_bbx_expression[i][0]),
                        'illumination': int(face_bbx_illumination[i][0]),
                        'invalid': int(face_bbx_invalid[i][0]),
                        'occlusion': int(face_bbx_occlusion[i][0]),
                        'pose': int(face_bbx_pose[i][0]),
                        })
                # end-for
                dataset[os.path.join(event_name, im_name+'.jpg')] = image_info_dict
            # end-for
        # end-for
        with open(jsonfile, 'w+') as fp:
            json.dump(dataset, fp, sort_keys=True, indent=4)
        # end-with
        return dataset
    # <Method: /ParseOfficalTrainValMat>
    
    # <Method: ParseRetinaTrainValTxt/>
    @staticmethod
    def ParseRetinaTrainValTxt(txtfile, jsonfile='./data/wider_face/retinaface_gt_v1.1/train_val.json'):
        lines = []
        with open(txtfile,'r') as fp:
            lines = fp.readlines()
        # end-with
        imgs_path = []
        words = []
        isFirst = True
        labels = []
        for line in lines:
            line = line.rstrip()
            if line.startswith('#'):
                if isFirst is True:
                    isFirst = False
                else:
                    labels_copy = labels.copy()
                    words.append(labels_copy)
                    labels.clear()
                # end-if
                path = line[2:]
                # path = os.path.join(dataset_images_path, path)
                imgs_path.append(path)
            else:
                line = line.split(' ')
                label = [float(x) for x in line]
                labels.append(label)
            # end-if
        # end-for
        words.append(labels)
        dataset = { }
        for path, labels in zip(imgs_path, words):
            image_info_dict = {}
            image_info_dict['name'] = "Unknow"
            image_info_dict['event'] = "Unknow"
            image_info_dict['bboxes'] = []
            for label in labels:
                if len(label) > 4:
                    # is train set
                    image_info_dict['bboxes'].append({
                        "x1y1x2y2": [float(label[0]), float(label[1]), float(label[0])+float(label[2]), float(label[1])+float(label[3])],
                        "landmarks": [
                            [ label[ 4], label[ 5] ],
                            [ label[ 7], label[ 8] ],
                            [ label[10], label[11] ],
                            [ label[13], label[14] ],
                            [ label[16], label[17] ],
                            ],
                        })
                else:
                    # is val set
                    image_info_dict['bboxes'].append({
                        "x1y1x2y2": [float(label[0]), float(label[1]), float(label[0])+float(label[2]), float(label[1])+float(label[3])],
                        })                    
                    pass
                # end-if
            # end-for
            dataset[path] = image_info_dict
        # end-for
        with open(jsonfile, 'w+') as fp:
            json.dump(dataset, fp, sort_keys=True, indent=4)
        # end-with
        return dataset
    # <Method: /ParseRetinaTrainValTxt>

    # <Method: MergeRetinaAndOfficalTrainValJson/>
    @staticmethod
    def MergeRetinaAndOfficalTrainValJson(retina_jsonfile, offical_jsonfile, merged_jsonfile='./data/wider_face/retinaface_gt_v1.1/merged_train_val.json'):
        retina_dict = { }        
        with open(retina_jsonfile, 'r') as fs:
            retina_dict = json.load(fs)
        # end-with
        offical_dict = { }
        with open(offical_jsonfile, 'r') as fs:
            offical_dict = json.load(fs)
        # end-with
        enhanced_dict = { }
        print("len(retina_dict) = {0}".format(len(retina_dict)))
        for key in retina_dict.keys():            
            if key in offical_dict.keys():                
                retina_dict[key]['name']  = offical_dict[key]['name']
                retina_dict[key]['event'] = offical_dict[key]['event']
                paired_indies = []
                offical_rects = [bbox['x1y1x2y2'] for bbox in offical_dict[key]['bboxes']]
                retina_rects = [bbox['x1y1x2y2'] for bbox in retina_dict[key]['bboxes']]
                if offical_rects != retina_rects:
                    if retina_rects == [[0.0, 0.0, 0.0, 0.0]] and len(offical_rects) == 0:                            
                        print("[len(offical_rects) == 0]: ", key)
                    else:
                        print("[offical_rects != retina_rects]: ", key)
                        continue
                    # end-if
                # end-if
                enhanced_dict[key] = { 
                    'name': offical_dict[key]['name'],
                    'event': offical_dict[key]['event'],
                    'bboxes': [ ],
                    }
                if retina_rects == [[0.0, 0.0, 0.0, 0.0]] and len(offical_rects) == 0:
                    continue
                # end-if
                for retina_bbox, offical_bbox in zip(retina_dict[key]['bboxes'], offical_dict[key]['bboxes']):
                    assert(retina_bbox['x1y1x2y2'] == offical_bbox['x1y1x2y2'])
                    if 'landmarks' in retina_bbox.keys():
                        enhanced_dict[key]['bboxes'].append({
                            "x1y1x2y2": retina_bbox['x1y1x2y2'],
                            "landmarks": retina_bbox['landmarks'],
                            "blur": offical_bbox['blur'],
                            "expression": offical_bbox['expression'],
                            "illumination": offical_bbox['illumination'],
                            "invalid": offical_bbox['invalid'],
                            "occlusion": offical_bbox['occlusion'],
                            "pose": offical_bbox['pose'],                        
                            })
                    else:
                        enhanced_dict[key]['bboxes'].append({
                            "x1y1x2y2": retina_bbox['x1y1x2y2'],
                            "blur": offical_bbox['blur'],
                            "expression": offical_bbox['expression'],
                            "illumination": offical_bbox['illumination'],
                            "invalid": offical_bbox['invalid'],
                            "occlusion": offical_bbox['occlusion'],
                            "pose": offical_bbox['pose'],                        
                            })
                    # end-if
                # end-for
            else:
                assert(0)
            # end-if
        # end-for
        with open(merged_jsonfile, 'w+') as fp:
            json.dump(enhanced_dict, fp, sort_keys=True, indent=4)
        # end-with
    # <Method: /MergeRetinaAndOfficalTrainValJson>

    # <Method: ParseOfficalTestMat/>
    @staticmethod
    def ParseOfficalTestMat(matfile, jsonfile='./data/wider_face/retinaface_gt_v1.1/test.json'):
        dataset = [ ]
        fmat = scipy.io.loadmat(matfile)
        event_list = fmat.get('event_list')
        file_list = fmat.get('file_list')
        for event_idx, event in enumerate(event_list):
            event_name = event[0][0]
            for im_idx, im in enumerate(file_list[event_idx][0]):            
                im_name = im[0][0]
                dataset.append(os.path.join(event_name, im_name + '.jpg'))
            # end-for
        # end-for
        with open(jsonfile, 'w+') as fs:
            json.dump(dataset, fs, sort_keys=True, indent=4)
        # end-with
        return dataset
    # <Method: /ParseOfficalTestMat>

# <Class: /WiderFaceParser>

# <Class: EnhancedRetinaWiderFaceDataset/>
class EnhancedRetinaWiderFaceDataset(torch.utils.data.Dataset):
    """
    Some information about EnhancedRetinaWiderFaceDataset:
    - dataset_images_path(str): Path to folder of wider face images.
    - dataset_annotations_jsonfile(str): Path to json file of annotations.
    - preproc(Function|object): Function or Object of pre-processing.
    """
    # <Method: __init__/>
    def __init__(
        self, 
        dataset_images_path,
        dataset_annotations_jsonfile, 
        preproc=TrainPreprocessor((256, 256), (0, 0, 0))
        ):
        self._preproc = preproc
        self._images = [ ]        
        self._lables = [ ]
        # 
        json_dict = None
        with open(dataset_annotations_jsonfile, 'r') as fp:
            json_dict = json.load(fp)
        # end-with
        assert(json_dict is not None)
        for k,v in json_dict.items():
            # load image path
            img_path = os.path.join(dataset_images_path, k)
            assert(os.path.exists(img_path))
            self._images.append(img_path)
            # load annotations
            bboxes = v['bboxes']
            annotations = np.zeros((0, 20))
            for bbox in bboxes:               
                # annotation[:, 0:4]    -->     left(x1), top(y1), right(x2), bottom(y2) of box rect.
                # annotation[:, 4:14]   -->     landmarks coordinates (p0.x, p0.y, p1.x, p1.y, p2.x, p2.y, p3.x, p3.y, p4.x, p4.y).
                # annotation[:, 14]     -->     1 is valid, -1 is invalid.
                # annotation[:, 15]     -->     blur level.
                # annotation[:, 16]     -->     expression level.
                # annotation[:, 17]     -->     illumination level.
                # annotation[:, 18]     -->     occlusion level.
                # annotation[:, 19]     -->     pose level.
                annotation = np.zeros((1, 20))
                annotation[:, 0:4] = np.array([bbox['x1y1x2y2'], ])
                if 'landmarks' in bbox.keys():
                    # is train set:
                    annotation[:, 4:14] = np.array(bbox['landmarks']).reshape(1, -1)
                    annotation[:, 14] = -1 if bbox['landmarks'] == [[-1.0, -1.0], [-1.0, -1.0], [-1.0, -1.0], [-1.0, -1.0], [-1.0, -1.0]] else 1
                else:
                    # is val set ...
                    annotation[:, 14] = 1
                # end-if
                annotation[:, 15] = bbox['blur']
                annotation[:, 16] = bbox['expression']
                annotation[:, 17] = bbox['illumination']
                annotation[:, 18] = bbox['occlusion']
                annotation[:, 19] = bbox['pose']
                # annotation stack to annotations at dimension 0
                annotations = np.append(annotations, annotation, axis=0)
            # end-for
            self._lables.append(annotations)
        # end-for        
    # <Method: /__init__>

    # <Method: __len__/>
    def __len__(self):
        assert(len(self._lables) == len(self._images))
        return len(self._lables)
    # <Method: /__len__>

    # <Method: __getitem__/>
    def __getitem__(self, index):        
        image = cv2.imread(self._images[index])
        lable = self._lables[index]
        if self._preproc is not None:
            image, lable = self._preproc(image, lable)
        # end-if
        return torch.from_numpy(image), lable
    # <Method: /__getitem__>
# <Class: /EnhancedRetinaWiderFaceDataset>


# <Class: OfficalWiderFaceDataset/>
class OfficalWiderFaceDataset(torch.utils.data.Dataset):
    """
    Some information about OfficalWiderFaceDataset:
    - dataset_images_path(str): Path to folder of wider face images.
    - dataset_annotations_jsonfile(str): Path to json file of annotations.
    - preproc(Function|object): Function or Object of pre-processing.
    """
    # <Method: __init__/>
    def __init__(
        self, 
        dataset_images_path,
        dataset_annotations_jsonfile, 
        preproc=TrainPreprocessor((256, 256))
        ):
        self._preproc = preproc
        self._images = [ ]        
        self._lables = [ ]
        # 
        json_dict = None
        with open(dataset_annotations_jsonfile, 'r') as fp:
            json_dict = json.load(fp)
        # end-with
        assert(json_dict is not None)
        for k,v in json_dict.items():
            # load image path
            img_path = os.path.join(dataset_images_path, k)
            assert(os.path.exists(img_path))
            self._images.append(img_path)
            # load annotations
            bboxes = v['bboxes']
            annotations = np.zeros((0, 20))
            for bbox in bboxes:               
                # annotation[:, 0:4]    -->     left(x1), top(y1), right(x2), bottom(y2) of box rect.
                # annotation[:, 4:14]   -->     landmarks coordinates (p0.x, p0.y, p1.x, p1.y, p2.x, p2.y, p3.x, p3.y, p4.x, p4.y).
                # annotation[:, 14]     -->     1 is valid, -1 is invalid.
                # annotation[:, 15]     -->     blur level.
                # annotation[:, 16]     -->     expression level.
                # annotation[:, 17]     -->     illumination level.
                # annotation[:, 18]     -->     occlusion level.
                # annotation[:, 19]     -->     pose level.
                annotation = np.zeros((1, 20))
                annotation[:, 0:4] = np.array([bbox['x1y1x2y2'], ])
                if 'landmarks' in bbox.keys():
                    # is train set:
                    annotation[:, 4:14] = np.array(bbox['landmarks']).reshape(1, -1)
                    annotation[:, 14] = -1 if bbox['landmarks'] == [[-1.0, -1.0], [-1.0, -1.0], [-1.0, -1.0], [-1.0, -1.0], [-1.0, -1.0]] else 1
                else:
                    # is val set ...
                    annotation[:, 14] = 1
                # end-if
                annotation[:, 15] = bbox['blur']
                annotation[:, 16] = bbox['expression']
                annotation[:, 17] = bbox['illumination']
                annotation[:, 18] = bbox['occlusion']
                annotation[:, 19] = bbox['pose']
                # annotation stack to annotations at dimension 0
                annotations = np.append(annotations, annotation, axis=0)
            # end-for
            self._lables.append(annotations)
        # end-for        
    # <Method: /__init__>

    # <Method: __len__/>
    def __len__(self):
        assert(len(self._lables) == len(self._images))
        return len(self._lables)
    # <Method: /__len__>

    # <Method: __getitem__/>
    def __getitem__(self, index):        
        image = cv2.imread(self._images[index])
        lable = self._lables[index]
        if self._preproc is not None:
            image, lable = self._preproc(image, lable)
        # end-if
        return torch.from_numpy(image), lable
    # <Method: /__getitem__>
# <Class: /OfficalWiderFaceDataset>


if __name__ == "__main__":  
    # python -m data.retinaface.widerfacedataset 
    
    if 0:
        WiderFaceParser.ParseOfficalTrainValMat(matfile='./data/wider_face/wider_face_split/wider_face_train.mat', jsonfile='./data/wider_face/wider_face_split/wider_face_train_offical.json')
        WiderFaceParser.ParseOfficalTrainValMat(matfile='./data/wider_face/wider_face_split/wider_face_val.mat', jsonfile='./data/wider_face/wider_face_split/wider_face_val_offical.json')
        WiderFaceParser.ParseOfficalTrainValMat(matfile='./data/wider_face/wider_face_split/wider_easy_val.mat', jsonfile='./data/wider_face/wider_face_split/wider_easy_val_offical.json')
        WiderFaceParser.ParseOfficalTrainValMat(matfile='./data/wider_face/wider_face_split/wider_medi_val.mat', jsonfile='./data/wider_face/wider_face_split/wider_medi_val_offical.json')
        WiderFaceParser.ParseOfficalTrainValMat(matfile='./data/wider_face/wider_face_split/wider_hard_val.mat', jsonfile='./data/wider_face/wider_face_split/wider_hard_val_offical.json')        
        WiderFaceParser.ParseOfficalTestMat(matfile='./data/wider_face/wider_face_split/wider_face_test.mat', jsonfile='./data/wider_face/wider_face_split/wider_face_test_offical.json')
        WiderFaceParser.ParseRetinaTrainValTxt(txtfile='./data/wider_face/retinaface_gt_v1.1/train/label.txt', jsonfile='./data/wider_face/wider_face_split/wider_face_train_retina.json')
        WiderFaceParser.ParseRetinaTrainValTxt(txtfile='./data/wider_face/retinaface_gt_v1.1/val/label.txt', jsonfile='./data/wider_face/wider_face_split/wider_face_val_retina.json')
        print("WiderFaceParser.MergeRetinaAndOfficalTrainValJson, Train:")
        WiderFaceParser.MergeRetinaAndOfficalTrainValJson(
            retina_jsonfile = './data/wider_face/wider_face_split/wider_face_train_retina.json', 
            offical_jsonfile = './data/wider_face/wider_face_split/wider_face_train_offical.json', 
            merged_jsonfile = './data/wider_face/wider_face_split/wider_face_train_retina_enhanced.json'
            )
        print("WiderFaceParser.MergeRetinaAndOfficalTrainValJson, Val:")
        WiderFaceParser.MergeRetinaAndOfficalTrainValJson(
            retina_jsonfile = './data/wider_face/wider_face_split/wider_face_val_retina.json', 
            offical_jsonfile = './data/wider_face/wider_face_split/wider_face_val_offical.json', 
            merged_jsonfile = './data/wider_face/wider_face_split/wider_face_val_retina_enhanced.json'
            )
    # end-if

    NormStd = (0.5, 0.5, 0.5)
    NormMean = (0.5, 0.5, 0.5)
    image_size = 512
    batch_size = 32

    trainset = EnhancedRetinaWiderFaceDataset(
        dataset_images_path='./data/wider_face/WIDER_train/images', 
        dataset_annotations_jsonfile='./data/annotations/wider_face_train_retina_enhanced.json', 
        preproc=TrainPreprocessor((image_size, image_size), NormMean, NormStd)
        )

    if 0:
        for idx in range(len(trainset)):
            tensor_img, lables = trainset[idx]
            image = tensor_img.permute(1, 2, 0).numpy() * NormStd + NormMean
            for lable in lables:
                lable[0:14] *= image_size
                image = cv2.rectangle(image, (int(lable[0]), int(lable[1])), (int(lable[2]), int(lable[3])), (0, 255, 0), 1)
                image = cv2.circle(image, (int(lable[4]), int(lable[5])), 2, (0, 0, 255))
                image = cv2.circle(image, (int(lable[6]), int(lable[7])), 2, (0, 0, 255))
                image = cv2.circle(image, (int(lable[8]), int(lable[9])), 2, (0, 0, 255))
                image = cv2.circle(image, (int(lable[10]), int(lable[11])), 2, (0, 0, 255))
                image = cv2.circle(image, (int(lable[12]), int(lable[13])), 2, (0, 0, 255))
            # end-for
            cv2.imshow("image", image)
            cv2.waitKey(1)
        # end-for
    # end-if

    if 0:
        trainloader = torch.utils.data.DataLoader(trainset, batch_size, shuffle=True, num_workers=1, collate_fn=detection_collate)
        for batch_idx, batch_data in enumerate(trainloader):
            print('load ', batch_idx, '/', len(trainloader))        
            print('batch_data[0].size()[0]: ', batch_data[0].size()[0])
            batch_images = batch_data[0]
            batch_labels = batch_data[1]
            for i in range(batch_images.size()[0]):
                image = batch_images[i].permute(1, 2, 0).numpy() * NormStd + NormMean
                image = image.copy()
                labels = batch_labels[i]
                for lable in labels:
                    lable[0:14] *= image_size
                    image = cv2.rectangle(image, (int(lable[0]), int(lable[1])), (int(lable[2]), int(lable[3])), (0, 255, 0), 1)
                    image = cv2.circle(image, (int(lable[4]), int(lable[5])), 2, (0, 0, 255), thickness=-1, lineType=8)
                    image = cv2.circle(image, (int(lable[6]), int(lable[7])), 2, (0, 0, 255), thickness=-1, lineType=8)
                    image = cv2.circle(image, (int(lable[8]), int(lable[9])), 2, (0, 0, 255), thickness=-1, lineType=8)
                    image = cv2.circle(image, (int(lable[10]), int(lable[11])), 2, (0, 0, 255), thickness=-1, lineType=8)
                    image = cv2.circle(image, (int(lable[12]), int(lable[13])), 2, (0, 0, 255), thickness=-1, lineType=8)
                # end-for
                cv2.imshow("image", image)
                cv2.waitKey(1)
            # end-for
        # end-for
    # end-if

    valset = EnhancedRetinaWiderFaceDataset(
        dataset_images_path='./data/wider_face/WIDER_val/images', 
        dataset_annotations_jsonfile='./data/annotations/wider_easy_val_offical.json', 
        preproc=EvalPreprocessor((image_size, image_size), NormMean, NormStd)
        )

    if 0:
        for idx in range(len(valset)):
            tensor_img, lables = valset[idx]
            image = tensor_img.permute(1, 2, 0).numpy() * NormStd + NormMean
            for lable in lables:
                lable[0:14] *= image_size
                image = cv2.rectangle(image, (int(lable[0]), int(lable[1])), (int(lable[2]), int(lable[3])), (0, 255, 0), 1)
            # end-for
            cv2.imshow("image", image)
            cv2.waitKey(0)
        # end-for
    # end-if

    if 1:
        valloader = torch.utils.data.DataLoader(valset, batch_size, shuffle=False, num_workers=1, collate_fn=detection_collate)
        for batch_idx, batch_data in enumerate(valloader):
            batch_images = batch_data[0]
            batch_labels = batch_data[1]
            for i in range(batch_images.size()[0]):
                image = batch_images[i].permute(1, 2, 0).numpy() * NormStd + NormMean
                image = image.copy()
                labels = batch_labels[i]
                for lable in labels:
                    lable[0:14] *= image_size
                    image = cv2.rectangle(image, (int(lable[0]), int(lable[1])), (int(lable[2]), int(lable[3])), (0, 255, 0), 1)
                # end-for
                cv2.imshow("image", image)
                cv2.waitKey(1)
            # end-for
        # end-for
    # end-if    