import numpy as np
import cv2
import os
import pickle
import random
from tqdm import tqdm
from .build import DATASET_OUTPUT_DIR, DATASET_CONFIG_DIR, DATASET_REGISTRY, DATASET_INFO_REGISTRY
from .utils import get_dataset_info_cfg, setup_dataset_info, map_category

DATASET = 'RIFIR'
DATASET_BASE_DIR = 'dataset/RIFIR'


CLASSES_LIST = ['pedestrian', 'crowd', 'ped']
CLASSES_INDEX = dict()
for i, class_name in enumerate(CLASSES_LIST):
    CLASSES_INDEX[class_name] = i
FAKE_CLASSES_LIST = ['person']

map_dict = {
    0: 0,
    1: 0,
    2: 0,
}

def get_RIFIR_dicts(DATASET_BASE_DIR, image_set_name):
    annotations_full_path = os.path.join(DATASET_BASE_DIR, 'Annotations', image_set_name + '.txt')
    with open(annotations_full_path, 'r') as f:
        lines = f.readlines()

    num_images = int(lines[0].strip().split()[0].split('=')[-1].strip())
    num_objects = int(lines[0].strip().split()[1].split('=')[-1].strip())
    
    image_info_list = [[] for i in range(num_images)]

    for line_index in range(num_objects):
        start_line = line_index * 10 + 2

        line_split = lines[start_line].strip().split()
        object_class = line_split[0].split('=')[-1].strip("'")
        begin_frame = int(line_split[2].split('=')[-1])
        end_frame = int(line_split[3].split('=')[-1])

        if object_class not in CLASSES_LIST:
            continue

        line_split = lines[start_line+2].split('=')[-1].strip()[1:-1].split(';')
        bbox_list = []
        for bbox in line_split:
            bbox = [int(item) for item in bbox.strip().split()]
            bbox_list.append(bbox)

        OUTSIDE = 'remove' # remove or truncate
        for i, bbox in enumerate(bbox_list):
            if bbox[-1] <= 0 or bbox[-2] <=0:
                bbox_list[i] = None
            else:
                for j in range(0,2):
                    if bbox[j] < 0:
                        if OUTSIDE == 'truncate':
                            bbox_list[i][j] = 0
                        elif OUTSIDE == 'remove':
                            bbox_list[i] = None
                            break
                        else:
                            raise "Invalid OUTSIDE value"

        for i, bbox in enumerate(bbox_list):
            if bbox is not None:
                object_dict = {
                    'label': object_class,
                    'bounding_box': [float(bbox[0]), float(bbox[1]), float(bbox[0] + bbox[2]), float(bbox[1] + bbox[3])]
                }
                image_info_list[begin_frame+i].append(object_dict)

    # to uniform interface
    dataset_dicts = []
    for image_id, image_info in enumerate(image_info_list):
        image_full_path = os.path.join(DATASET_BASE_DIR, image_set_name + '_FarInfrared', '%06d.jpg' % image_id)
        img = cv2.imread(os.path.join(image_full_path))
        if img is None:
            raise FileNotFoundError("Cannot not load image " + image_full_path)

        height, width, channels = img.shape

        objects = []
        for object in image_info_list[image_id]:
            object_dict = {
                'bbox': object['bounding_box'],
                'category_id': CLASSES_INDEX[object['label']]
            }
            objects.append(object_dict)

        record = {
            'file_name': image_full_path,
            'image_id': image_id,
            'annotations': objects,
            'params': {
                'height': height,
                'width': width,
            }
        }
        dataset_dicts.append(record)

    return dataset_dicts


def crop_dataset_dicts(dataset_dicts, size):
    """ size: (w, h) """
    random.seed(0)
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        h, w = dataset_dict['params']['height'], dataset_dict['params']['width']
        new_w, new_h = size
        x = int(random.random() * (w - new_w))
        y = int(random.random() * (h - new_h))

        clamp = lambda x, lower, upper: min(max(x, lower), upper)
        
        new_objects = []
        for obj in dataset_dict['annotations']:
            inter_w = max(min(x+new_w, obj['bbox'][2]) - max(x, obj['bbox'][0]), 0)
            inter_h = max(min(y+new_h, obj['bbox'][3]) - max(y, obj['bbox'][1]), 0)
            inter_area_ratio = inter_h * inter_w / ((obj['bbox'][3] - obj['bbox'][1]) * (obj['bbox'][2] - obj['bbox'][0]))
            if inter_area_ratio < 0.5:
                continue

            obj['bbox'][0] = clamp(obj['bbox'][0] - x, 0, new_w)
            obj['bbox'][1] = clamp(obj['bbox'][1] - y, 0, new_h)
            obj['bbox'][2] = clamp(obj['bbox'][2] - x, 0, new_w)
            obj['bbox'][3] = clamp(obj['bbox'][3] - y, 0, new_h)
            obj['bbox'] = [float(i) for i in obj['bbox']]
            new_objects.append(obj)
        dataset_dict['annotations'] = new_objects
        dataset_dict['params']['height'] = new_h
        dataset_dict['params']['width'] = new_w
        dataset_dict['params']['crop'] = [x, y, x+new_w, y+new_h]
        new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts


def select_classes(dataset_dicts, classes_list):
    classes_list = [CLASSES_INDEX[label] for label in classes_list]
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        new_object_list = []
        for object_dict in dataset_dict["annotations"]:
            if object_dict['category_id'] in classes_list:
                new_object_list.append(object_dict)
        dataset_dict['annotations'] = new_object_list
        new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts

def remove_empty(dataset_dicts):
    dataset_dicts = [dataset_dict for dataset_dict in dataset_dicts if len(dataset_dict['annotations']) > 0]
    return dataset_dicts

def bind_dataset_type(dataset_dicts, dataset_type):
    for i in range(len(dataset_dicts)):
        dataset_dicts[i]['params']['dataset_type'] = dataset_type
    return dataset_dicts

def get_dataset_dicts(dataset_name, **kwargs):
    available_set = set(['sep', 'crop', 'remove_empty', 'bind_dataset_type'])
    assert set(kwargs.keys()).issubset(available_set), 'Invalid kwargs'

    content = ''
    if 'sep' in kwargs:
        content += '-[sep]-' + '-'.join(kwargs['sep']['classes_list'])
    if 'crop' in kwargs:
        content += '-[crop]-' + '-'.join([str(i) for i in kwargs['crop']['size']])
    if 'remove_empty' in kwargs:
        content += '-[remove_empty]'
    if 'bind_dataset_type' in kwargs:
        content += '-[bdt]-' + str(kwargs['bind_dataset_type']['dataset_type'])
    pickle_file_name = os.path.join(DATASET_OUTPUT_DIR, DATASET + '-' + dataset_name + content + '-dataset.pkl')
    dataset_dicts = None
    if os.path.exists(pickle_file_name):
        with open(pickle_file_name, 'rb') as f:
            dataset_dicts = pickle.load(f)
    else:
        if 'bind_dataset_type' in kwargs:
            params = kwargs.pop('bind_dataset_type')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = bind_dataset_type(dataset_dicts, params['dataset_type'])
        elif 'remove_empty' in kwargs:
            kwargs.pop('remove_empty')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = remove_empty(dataset_dicts)
        elif 'crop' in kwargs:
            params = kwargs.pop('crop')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = crop_dataset_dicts(dataset_dicts, params['size'])
        elif 'sep' in kwargs:
            params = kwargs.pop('sep')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = select_classes(dataset_dicts, params['classes_list'])
        else:
            assert len(kwargs) == 0
            dataset_dicts = get_RIFIR_dicts(DATASET_BASE_DIR, dataset_name)
        os.makedirs(os.path.dirname(pickle_file_name), exist_ok=True)
        with open(pickle_file_name, 'wb') as f:
            pickle.dump(dataset_dicts, f)
    return dataset_dicts

dataset_name_train = 'Train_1'
dataset_name_test = 'Test'

DATASET_INFO_REGISTRY.register(DATASET, get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-person-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-person-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster-person-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster2', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster2.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster2-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster2.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster2-person-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster2.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster3', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster3.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster3-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster3.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster3-person-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster3.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster4', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster4.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster4-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster4.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster4-person-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster4.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster5', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster5.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster5-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster5.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-cluster5-person-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'RIFIR-cluster5.yaml')))

for postfix, kwargs in {
        '': {},
        '-g': {},
        '-remove-empty': {
            'remove_empty': None
        },
        '-g-remove-empty': {
            'remove_empty': None
        },
        '-person-remove-empty': {
            'sep': {'classes_list': ['pedestrian', 'ped']},
            'remove_empty': None,
        },
        '-g-person-remove-empty': {
            'sep': {'classes_list': ['pedestrian', 'ped']},
            'remove_empty': None,
        },
        '-cluster': {},
        '-cluster-remove-empty': {
            'remove_empty': None
        },
        '-cluster-person-remove-empty': {
            'sep': {'classes_list': ['pedestrian', 'ped']},
            'remove_empty': None
        },
        '-cluster2': {},
        '-cluster2-remove-empty': {
            'remove_empty': None
        },
        '-cluster2-person-remove-empty': {
            'sep': {'classes_list': ['pedestrian', 'ped']},
            'remove_empty': None
        },
        '-cluster3': {},
        '-cluster3-remove-empty': {
            'remove_empty': None
        },
        '-cluster3-person-remove-empty': {
            'sep': {'classes_list': ['pedestrian', 'ped']},
            'remove_empty': None
        },
        '-cluster4': {},
        '-cluster4-remove-empty': {
            'remove_empty': None
        },
        '-cluster4-person-remove-empty': {
            'sep': {'classes_list': ['pedestrian', 'ped']},
            'remove_empty': None
        },
        '-cluster5': {},
        '-cluster5-remove-empty': {
            'remove_empty': None
        },
        '-cluster5-person-remove-empty': {
            'sep': {'classes_list': ['pedestrian', 'ped']},
            'remove_empty': None
        },
    }.items():
    dataset_full_name = DATASET + postfix
    
    DATASET_REGISTRY.register(dataset_full_name +'-train', lambda dataset_name=dataset_name_train, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))
    DATASET_REGISTRY.register(dataset_full_name +'-test', lambda dataset_name=dataset_name_test, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))

