import numpy as np
import cv2
import os
import pickle
import random
from pycocotools.coco import COCO
from tqdm import tqdm
from .build import DATASET_OUTPUT_DIR, DATASET_CONFIG_DIR, DATASET_REGISTRY, DATASET_INFO_REGISTRY
from .utils import get_dataset_info_cfg, setup_dataset_info, convert_to_one_category, map_category, get_plugin_annotations
import itertools
import copy
import hashlib

DATASET = 'FLIR'
DATASET_BASE_DIR = 'dataset/FLIR'


CLASSES_LIST = ['person', 'car']
CLASSES_INDEX = dict()
for i, class_name in enumerate(CLASSES_LIST):
    CLASSES_INDEX[class_name] = i
FAKE_CLASSES_LIST = ['person', 'car']

map_dict = {
    0: 0,
    1: 1,
}


def get_FLIR_dicts(DATASET_BASE_DIR, dataset_name):
    dataset_dicts = []

    coco = COCO(os.path.join(DATASET_BASE_DIR, dataset_name, 'thermal_annotations.json'))

    for image_id in coco.getImgIds():
        image_info = coco.loadImgs(image_id)[0]
        image_path = os.path.join(DATASET_BASE_DIR, dataset_name, image_info['file_name'])
        height = image_info['height']
        width = image_info['width']

        annotations_ids = coco.getAnnIds(imgIds=image_id, iscrowd=False)
        coco_annotations = coco.loadAnns(annotations_ids)

        objects_list = []
        for annotation in coco_annotations:
            bbox = annotation['bbox']
            bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
            bbox[0] = max(0, bbox[0])
            bbox[1] = max(0, bbox[1])
            bbox[2] = min(width, bbox[2])
            bbox[3] = min(height, bbox[3])

            # some annotations have basically no width / height, skip them
            if bbox[2] - bbox[0] < 1 or bbox[3] - bbox[1] < 1:
                continue
            
            label = coco.loadCats(annotation['category_id'])[0]['name']
            if label not in CLASSES_LIST:
                continue

            object_dict = {
                'label': label,
                'bounding_box': [float(i) for i in bbox],
            }
            objects_list.append(object_dict)

        # to uniform interface
        objects = []
        for object in objects_list:
            object_dict = {
                'bbox': object['bounding_box'],
                'category_id': CLASSES_INDEX[object['label']]
            }
            objects.append(object_dict)

        record = {
            'file_name': image_path,
            'image_id': image_id,
            'annotations': objects,
            'params': {
                'height': height,
                'width': width,
            }
        }
        dataset_dicts.append(record)

    return dataset_dicts


def remove_empty(dataset_dicts):
    dataset_dicts = [dataset_dict for dataset_dict in dataset_dicts if len(dataset_dict['annotations']) > 0]
    return dataset_dicts


def remove_too_small(dataset_dicts, area_threshold):
    for i in range(len(dataset_dicts)):
        objs = []
        for obj in dataset_dicts[i]['annotations']:
            area = (obj['bbox'][2] - obj['bbox'][0]) * (obj['bbox'][3] - obj['bbox'][1])
            if area >= area_threshold:
                objs.append(obj)
        dataset_dicts[i]['annotations'] = objs
    return dataset_dicts


def get_dataset_dicts(dataset_name, **kwargs):
    available_set = set(['remove_empty', 'rm_small'])
    assert set(kwargs.keys()).issubset(available_set), 'Invalid kwargs'

    content = ''
    if 'remove_empty' in kwargs:
        content += '-[remove_empty]'
    if 'rm_small' in kwargs:
        content += '-[rm_small]-' + str(kwargs['rm_small']['area_threshold'])
    pickle_file_name = os.path.join(DATASET_OUTPUT_DIR, DATASET + '-' + dataset_name + content + '-dataset.pkl')
    dataset_dicts = None

    if os.path.exists(pickle_file_name):
        with open(pickle_file_name, 'rb') as f:
            dataset_dicts = pickle.load(f)
    else:
        if 'remove_empty' in kwargs:
            kwargs.pop('remove_empty')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = remove_empty(dataset_dicts)
        elif 'rm_small' in kwargs:
            params = kwargs.pop('rm_small')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = remove_too_small(dataset_dicts, params['area_threshold'])
        else:
            assert len(kwargs) == 0
            dataset_dicts = get_FLIR_dicts(DATASET_BASE_DIR, dataset_name)
        os.makedirs(os.path.dirname(pickle_file_name), exist_ok=True)
        with open(pickle_file_name, 'wb') as f:
            pickle.dump(dataset_dicts, f)

    return dataset_dicts


dataset_name_train = 'train'
dataset_name_test = 'val'

DATASET_INFO_REGISTRY.register(DATASET, get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'FLIR.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'FLIR.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-remove-small-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'FLIR.yaml')))

for postfix, kwargs in {
        '': {},
        '-remove-empty': {
            'remove_empty': None,
        },
        '-remove-small-empty': {
            'rm_small': {
                'area_threshold': 400,
            },
            'remove_empty': None,
        }
    }.items():
    dataset_full_name = DATASET + postfix

    DATASET_REGISTRY.register(dataset_full_name + '-train', lambda dataset_name=dataset_name_train, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))
    DATASET_REGISTRY.register(dataset_full_name + '-test', lambda dataset_name=dataset_name_test, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))
