import numpy as np
import cv2
import os
import pickle
import random
from pycocotools.coco import COCO
from tqdm import tqdm
from .build import DATASET_OUTPUT_DIR, DATASET_CONFIG_DIR, DATASET_REGISTRY, DATASET_INFO_REGISTRY
from .utils import get_dataset_info_cfg, setup_dataset_info, convert_to_one_category, map_category, get_plugin_annotations
import itertools
import copy
import hashlib

DATASET = 'aaurainsnow'
DATASET_BASE_DIR = 'dataset/aaurainsnow'


CLASSES_LIST = ['car', 'bus', 'truck', 'person']
CLASSES_INDEX = dict()
for i, class_name in enumerate(CLASSES_LIST):
    CLASSES_INDEX[class_name] = i
FAKE_CLASSES_LIST = ['car']

map_dict = {
    0: 1,
    1: 1,
    2: 1,
    3: 0,
}

def get_aaurainsnow_dicts(DATASET_BASE_DIR, dataset_name):
    dataset_dicts = []

    coco = COCO(os.path.join(DATASET_BASE_DIR, 'aauRainSnow-{}.json'.format(dataset_name)))
    
    for image_id in coco.getImgIds():
        image_info = coco.loadImgs(image_id)[0]
        image_path = os.path.join(DATASET_BASE_DIR, image_info['file_name'])
        height = image_info['height']
        width = image_info['width']

        annotations_ids = coco.getAnnIds(imgIds=image_id, iscrowd=False)
        coco_annotations = coco.loadAnns(annotations_ids)
        
        objects_list = []
        for annotation in coco_annotations:
            bbox = annotation['bbox']
            bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
            bbox[0] = max(0, bbox[0])
            bbox[1] = max(0, bbox[1])
            bbox[2] = min(width, bbox[2])
            bbox[3] = min(height, bbox[3])

            # some annotations have basically no width / height, skip them
            if bbox[2] - bbox[0] < 1 or bbox[3] - bbox[1] < 1:
                continue
            
            label = coco.loadCats(annotation['category_id'])[0]['name']
            if label not in CLASSES_LIST:
                continue

            object_dict = {
                'label': label,
                'bounding_box': [float(i) for i in bbox],
            }
            objects_list.append(object_dict)

        # to uniform interface
        objects = []
        for object in objects_list:
            object_dict = {
                'bbox': object['bounding_box'],
                'category_id': CLASSES_INDEX[object['label']]
            }
            objects.append(object_dict)

        record = {
            'file_name': image_path,
            'image_id': image_id,
            'annotations': objects,
            'params': {
                'height': height,
                'width': width,
            }
        }
        dataset_dicts.append(record)

    return dataset_dicts



def crop_dataset_dicts(dataset_dicts, size):
    """ size: (w, h) """
    random.seed(0)
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        h, w = dataset_dict['params']['height'], dataset_dict['params']['width']
        new_w, new_h = size
        x = int(random.random() * (w - new_w))
        y = int(random.random() * (h - new_h))

        clamp = lambda x, lower, upper: min(max(x, lower), upper)
        
        new_objects = []
        for obj in dataset_dict['annotations']:
            inter_w = max(min(x+new_w, obj['bbox'][2]) - max(x, obj['bbox'][0]), 0)
            inter_h = max(min(y+new_h, obj['bbox'][3]) - max(y, obj['bbox'][1]), 0)
            inter_area_ratio = inter_h * inter_w / ((obj['bbox'][3] - obj['bbox'][1]) * (obj['bbox'][2] - obj['bbox'][0]))
            if inter_area_ratio < 0.5:
                continue

            obj['bbox'][0] = clamp(obj['bbox'][0] - x, 0, new_w)
            obj['bbox'][1] = clamp(obj['bbox'][1] - y, 0, new_h)
            obj['bbox'][2] = clamp(obj['bbox'][2] - x, 0, new_w)
            obj['bbox'][3] = clamp(obj['bbox'][3] - y, 0, new_h)
            obj['bbox'] = [float(i) for i in obj['bbox']]
            new_objects.append(obj)
        dataset_dict['annotations'] = new_objects
        dataset_dict['params']['height'] = new_h
        dataset_dict['params']['width'] = new_w
        dataset_dict['params']['crop'] = [x, y, x+new_w, y+new_h]
        new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts


def select_classes(dataset_dicts, classes_list):
    classes_list = [CLASSES_INDEX[label] for label in classes_list]
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        new_object_list = []
        for object_dict in dataset_dict["annotations"]:
            if object_dict['category_id'] in classes_list:
                new_object_list.append(object_dict)
        dataset_dict['annotations'] = new_object_list
        new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts

def remove_empty(dataset_dicts):
    dataset_dicts = [dataset_dict for dataset_dict in dataset_dicts if len(dataset_dict['annotations']) > 0]
    return dataset_dicts

def train_test_split(dataset_dicts, train_ratio=0.7, shuffle=True):
    random.seed(0)
    if shuffle:
        random.shuffle(dataset_dicts)
    split_size = int(len(dataset_dicts) * train_ratio)
    
    train_dataset_dicts = dataset_dicts[:split_size]
    test_dataset_dicts = dataset_dicts[split_size:]
    return train_dataset_dicts, test_dataset_dicts

def pixel_mean_select(dataset_dicts, low, high):
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        file_name = dataset_dict['file_name']
        img = cv2.imread(file_name)
        if img is not None:
            if len(img.shape) > 2:
                img = img[:,:,:3]
            pixel_mean = img.mean()
            if low <= pixel_mean < high:
                new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts

def repeat(dataset_dicts, u_repeat, v_repeat):
    random.seed(0)
    for i in range(len(dataset_dicts)):
        w, h = dataset_dicts[i]['params']['width'], dataset_dicts[i]['params']['height']
        objs = dataset_dicts[i]['annotations']
        dataset_dicts[i]['annotations'] = []
        for u, v in itertools.product(list(range(u_repeat)), list(range(v_repeat))):
            new_objs = copy.deepcopy(objs)
            for j in range(len(new_objs)):
                new_objs[j]['bbox'] = [new_objs[j]['bbox'][0] + u * w, new_objs[j]['bbox'][1] + v * h, new_objs[j]['bbox'][2] + u * w, new_objs[j]['bbox'][3] + v * h]
            dataset_dicts[i]['annotations'] += new_objs
        dataset_dicts[i]['params']['repeat'] = [u_repeat, v_repeat]
        dataset_dicts[i]['params']['height'] *= v_repeat
        dataset_dicts[i]['params']['width'] *= u_repeat
    return dataset_dicts

def remove_too_small(dataset_dicts, area_threshold):
    for i in range(len(dataset_dicts)):
        objs = []
        for obj in dataset_dicts[i]['annotations']:
            area = (obj['bbox'][2] - obj['bbox'][0]) * (obj['bbox'][3] - obj['bbox'][1])
            if area >= area_threshold:
                objs.append(obj)
        dataset_dicts[i]['annotations'] = objs
    return dataset_dicts
    
def bind_dataset_type(dataset_dicts, dataset_type):
    for i in range(len(dataset_dicts)):
        dataset_dicts[i]['params']['dataset_type'] = dataset_type
    return dataset_dicts

def select_sub_dirs(dataset_dicts, sub_dirs):
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        for sub_dir in sub_dirs:
            if sub_dir in dataset_dict['file_name']:
                new_dataset_dicts.append(dataset_dict)
                break
    return new_dataset_dicts

def add_plugin_annotations(dataset_dicts, plugin_annotations_file):
    annotations_dict = get_plugin_annotations(plugin_annotations_file)
    for i in range(len(dataset_dicts)):
        filename = dataset_dicts[i]['file_name']
        if filename in annotations_dict:
            dataset_dicts[i]['annotations'] += annotations_dict[filename]
    return dataset_dicts

def remove_plugin_annotations(dataset_dicts, plugin_annotations_file):
    annotations_dict = get_plugin_annotations(plugin_annotations_file)
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        filename = dataset_dict['file_name']
        if not (filename in annotations_dict and len(annotations_dict[filename]) > 0):
            new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts

def get_dataset_dicts(dataset_name, **kwargs):
    available_set = set(['subset', 'sep', 'crop', 'remove_empty', 'split', 'pixel_mean_select', 'repeat', 'rm_small', 'bind_dataset_type', 'select_sub_dirs', 'add_plugin', 'remove_plugin'])
    assert set(kwargs.keys()).issubset(available_set), 'Invalid kwargs'

    content = ''
    if 'subset' in kwargs:
        content += '-[subset]-' + kwargs['subset']['name']
    if 'sep' in kwargs:
        content += '-[sep]-' + '-'.join(kwargs['sep']['classes_list'])
    if 'crop' in kwargs:
        content += '-[crop]-' + '-'.join([str(i) for i in kwargs['crop']['size']])
    if 'remove_empty' in kwargs:
        content += '-[remove_empty]'
    if 'split' in kwargs:
        content += '-[split]-' + str(kwargs['split']['train_ratio']) + ('-shuffle' if kwargs['split']['shuffle'] else '')
    if 'pixel_mean_select' in kwargs:
        content += '-[pixel_mean_select]-' + str(kwargs['pixel_mean_select']['low']) + '-' + str(kwargs['pixel_mean_select']['high'])
    if 'repeat' in kwargs:
        content += '-[repeat]-' + str(kwargs['repeat']['u_repeat']) + '-' + str(kwargs['repeat']['v_repeat'])
    if 'rm_small' in kwargs:
        content += '-[rm_small]-' + str(kwargs['rm_small']['area_threshold'])
    if 'bind_dataset_type' in kwargs:
        content += '-[bdt]-' + str(kwargs['bind_dataset_type']['dataset_type'])
    if 'select_sub_dirs' in kwargs:
        content += '-[ssd]-' + hashlib.blake2b(bytes(str(kwargs['select_sub_dirs']['sub_dirs']), encoding='utf-8'), digest_size=2).hexdigest()
    if 'add_plugin' in kwargs:
        content += '-[add_plu]-{}'.format(hashlib.blake2b(bytes(kwargs['add_plugin']['file'], encoding='utf-8'), digest_size=2).hexdigest())
    if 'remove_plugin' in kwargs:
        content += '-[rm_plu]-{}'.format(hashlib.blake2b(bytes(kwargs['remove_plugin']['file'], encoding='utf-8'), digest_size=2).hexdigest())
    pickle_file_name = os.path.join(DATASET_OUTPUT_DIR, DATASET + '-' + dataset_name + content + '-dataset.pkl')
    dataset_dicts = None
    if os.path.exists(pickle_file_name):
        with open(pickle_file_name, 'rb') as f:
            dataset_dicts = pickle.load(f)
    else:
        if 'bind_dataset_type' in kwargs:
            params = kwargs.pop('bind_dataset_type')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = bind_dataset_type(dataset_dicts, params['dataset_type'])
        elif 'split' in kwargs:
            params = kwargs.pop('split')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            assert dataset_name in ['train', 'test']
            if dataset_name == 'train':
                dataset_dicts = train_test_split(dataset_dicts, params['train_ratio'], params['shuffle'])[0]
            else:
                dataset_dicts = train_test_split(dataset_dicts, params['train_ratio'], params['shuffle'])[1]
        elif 'remove_empty' in kwargs:
            kwargs.pop('remove_empty')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = remove_empty(dataset_dicts)
        elif 'rm_small' in kwargs:
            params = kwargs.pop('rm_small')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = remove_too_small(dataset_dicts, params['area_threshold'])
        elif 'repeat' in kwargs:
            params = kwargs.pop('repeat')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = repeat(dataset_dicts, params['u_repeat'], params['v_repeat'])
        elif 'crop' in kwargs:
            params = kwargs.pop('crop')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = crop_dataset_dicts(dataset_dicts, params['size'])
        elif 'sep' in kwargs:
            params = kwargs.pop('sep')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = select_classes(dataset_dicts, params['classes_list'])
        elif 'pixel_mean_select' in kwargs:
            params = kwargs.pop('pixel_mean_select')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = pixel_mean_select(dataset_dicts, params['low'], params['high'])
        elif 'select_sub_dirs' in kwargs:
            params = kwargs.pop('select_sub_dirs')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = select_sub_dirs(dataset_dicts, params['sub_dirs'])
        elif 'add_plugin' in kwargs:
            params = kwargs.pop('add_plugin')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = add_plugin_annotations(dataset_dicts, params['file'])
        elif 'remove_plugin' in kwargs:
            params = kwargs.pop('remove_plugin')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = remove_plugin_annotations(dataset_dicts, params['file'])
        else:
            assert len(kwargs) == 1
            params = kwargs.pop('subset')
            dataset_dicts = get_aaurainsnow_dicts(DATASET_BASE_DIR, params['name'])
        os.makedirs(os.path.dirname(pickle_file_name), exist_ok=True)
        with open(pickle_file_name, 'wb') as f:
            pickle.dump(dataset_dicts, f)
    return dataset_dicts

dataset_name_train = 'train'
dataset_name_test = 'test'

DATASET_INFO_REGISTRY.register(DATASET+'-thermal-g', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-g-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-add-plugin-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-rm-noano-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-only-noano-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-remove-empty-t1', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-crop2-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-remove-small', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-car-remove-small', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-remove-small2', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-remove-small-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-repeat-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g-repeat.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-high', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g-high.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-low', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g-low.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-high-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g-high.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-thermal-split-g-low-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'aaurainsnow-thermal-g-low.yaml')))

for postfix, kwargs in {
        '-thermal-g': {
            'subset': {
                'name': 'thermal',
            },
        },
        '-thermal-g-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'remove_empty': None
        },
        '-thermal-split-g': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
        },
        '-thermal-split-g-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None
        },
        '-thermal-split-g-add-plugin-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None,
            'add_plugin': {
                'file': 'dataset/aaurainsnow-plugin-annotations.txt'
            },
        },
        '-thermal-split-g-rm-noano-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None,
            'select_sub_dirs': {
                'sub_dirs': [
                    'Egensevej',
                    'Hasserisvej',
                    'Hjorringvej',
                    'Hobrovej',
                    'Ostre',
                    'Ringvej',
                ]
            }
        },
        '-thermal-split-g-only-noano-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None,
            'select_sub_dirs': {
                'sub_dirs': [
                    'Hadsundvej',
                ]
            }
        },
        '-thermal-split-g-remove-empty-t1': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None,
            'bind_dataset_type': {
                'dataset_type': 1,
            },
        },
        '-thermal-split-g-crop2-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'crop': {'size': (320, 240)},
            'remove_empty': None
        },
        '-thermal-split-g-remove-small': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'rm_small': {
                'area_threshold': 400,
            },
        },
        '-thermal-split-g-car-remove-small': {
            'subset': {
                'name': 'thermal',
            },
            'sep': {
                'classes_list': ['car', 'bus', 'truck'],
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'rm_small': {
                'area_threshold': 400,
            },
        },
        '-thermal-split-g-remove-small2': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'rm_small': {
                'area_threshold': 1024,
            },
        },
        '-thermal-split-g-remove-small-empty': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'rm_small': {
                'area_threshold': 400,
            },
            'remove_empty': None
        },
        '-thermal-split-g-repeat-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'repeat': {
                'u_repeat': 2,
                'v_repeat': 2,
            },
            'remove_empty': None
        },
        '-thermal-split-g-high': {
            'subset': {
                'name': 'thermal',
            },
            'pixel_mean_select': {
                'low': 110,
                'high': 256,
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
        },
        '-thermal-split-g-high-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'pixel_mean_select': {
                'low': 110,
                'high': 256,
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None
        },
        '-thermal-split-g-low': {
            'subset': {
                'name': 'thermal',
            },
            'pixel_mean_select': {
                'low': 0,
                'high': 110,
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
        },
        '-thermal-split-g-low-remove-empty': {
            'subset': {
                'name': 'thermal',
            },
            'pixel_mean_select': {
                'low': 0,
                'high': 110,
            },
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None
        },
    }.items():
    dataset_full_name = DATASET + postfix
    
    DATASET_REGISTRY.register(dataset_full_name +'-train', lambda dataset_name=dataset_name_train, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))
    DATASET_REGISTRY.register(dataset_full_name +'-test', lambda dataset_name=dataset_name_test, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))

