import numpy as np
import cv2
import os
import pickle
import random
import xml.dom.minidom
from tqdm import tqdm
import hashlib
from .build import DATASET_OUTPUT_DIR, DATASET_CONFIG_DIR, DATASET_REGISTRY, DATASET_INFO_REGISTRY
from .utils import get_dataset_info_cfg, setup_dataset_info, convert_to_one_category, map_category

DATASET = 'OURS'
DATASET_BASE_DIR = 'dataset/OURS'


CLASSES_LIST = ['pedestrian', 'person', 'car', 'track']
CLASSES_INDEX = dict()
for i, class_name in enumerate(CLASSES_LIST):
    CLASSES_INDEX[class_name] = i
FAKE_CLASSES_LIST = ['person']

map_dict = {
    0: 0,
    1: 0,
    2: 1,
    3: 1,
}

def get_OURS_dicts(DATASET_BASE_DIR):
    dataset_dicts = []

    for image_dir in os.listdir(DATASET_BASE_DIR):
        image_dir_path = os.path.join(DATASET_BASE_DIR, image_dir)
        if not os.path.isdir(image_dir_path):
            continue

        for xml_filename in os.listdir(image_dir_path):
            if not xml_filename.endswith('.xml'):
                continue

            xml_path = os.path.join(DATASET_BASE_DIR, image_dir, xml_filename)
            collection = xml.dom.minidom.parse(xml_path).documentElement

            objects = collection.getElementsByTagName('object')

            image_filename = collection.getElementsByTagName('filename')[0].childNodes[0].data
            width = int(collection.getElementsByTagName('width')[0].childNodes[0].data)
            height = int(collection.getElementsByTagName('height')[0].childNodes[0].data)

            image_path = os.path.join(DATASET_BASE_DIR, image_dir, image_filename)

            objects_list = []
            for object in objects:
                label = object.getElementsByTagName('name')[0].childNodes[0].data
                xmin = object.getElementsByTagName('bndbox')[0].getElementsByTagName('xmin')[0].childNodes[0].data
                ymin = object.getElementsByTagName('bndbox')[0].getElementsByTagName('ymin')[0].childNodes[0].data
                xmax = object.getElementsByTagName('bndbox')[0].getElementsByTagName('xmax')[0].childNodes[0].data
                ymax = object.getElementsByTagName('bndbox')[0].getElementsByTagName('ymax')[0].childNodes[0].data

                if label in CLASSES_LIST:
                    object_dict = {
                        'label': label,
                        'bounding_box': [float(xmin), float(ymin), float(xmax), float(ymax)],
                    }
                    objects_list.append(object_dict)

            # to uniform interface
            objects = []
            for object in objects_list:
                object_dict = {
                    'bbox': object['bounding_box'],
                    'category_id': CLASSES_INDEX[object['label']]
                }
                objects.append(object_dict)

            record = {
                'file_name': image_path,
                'image_id': image_filename,
                'annotations': objects,
                'params': {
                    'height': height,
                    'width': width,
                }
            }
            dataset_dicts.append(record)
        
    return dataset_dicts



def crop_dataset_dicts(dataset_dicts, size):
    """ size: (w, h) """
    random.seed(0)
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        h, w = dataset_dict['params']['height'], dataset_dict['params']['width']
        new_w, new_h = size
        x = int(random.random() * (w - new_w))
        y = int(random.random() * (h - new_h))

        clamp = lambda x, lower, upper: min(max(x, lower), upper)
        
        new_objects = []
        for obj in dataset_dict['annotations']:
            inter_w = max(min(x+new_w, obj['bbox'][2]) - max(x, obj['bbox'][0]), 0)
            inter_h = max(min(y+new_h, obj['bbox'][3]) - max(y, obj['bbox'][1]), 0)
            inter_area_ratio = inter_h * inter_w / ((obj['bbox'][3] - obj['bbox'][1]) * (obj['bbox'][2] - obj['bbox'][0]))
            if inter_area_ratio < 0.5:
                continue

            obj['bbox'][0] = clamp(obj['bbox'][0] - x, 0, new_w)
            obj['bbox'][1] = clamp(obj['bbox'][1] - y, 0, new_h)
            obj['bbox'][2] = clamp(obj['bbox'][2] - x, 0, new_w)
            obj['bbox'][3] = clamp(obj['bbox'][3] - y, 0, new_h)
            obj['bbox'] = [float(i) for i in obj['bbox']]
            new_objects.append(obj)
        dataset_dict['annotations'] = new_objects
        dataset_dict['params']['height'] = new_h
        dataset_dict['params']['width'] = new_w
        dataset_dict['params']['crop'] = [x, y, x+new_w, y+new_h]
        new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts


def select_classes(dataset_dicts, classes_list):
    classes_list = [CLASSES_INDEX[label] for label in classes_list]
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        new_object_list = []
        for object_dict in dataset_dict["annotations"]:
            if object_dict['category_id'] in classes_list:
                new_object_list.append(object_dict)
        dataset_dict['annotations'] = new_object_list
        new_dataset_dicts.append(dataset_dict)
    return new_dataset_dicts

def have_classes(dataset_dicts, classes_list):
    classes_list = [CLASSES_INDEX[label] for label in classes_list]
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        for object_dict in dataset_dict["annotations"]:
            if object_dict['category_id'] in classes_list:
                new_dataset_dicts.append(dataset_dict)
                break
    return new_dataset_dicts

def remove_empty(dataset_dicts):
    dataset_dicts = [dataset_dict for dataset_dict in dataset_dicts if len(dataset_dict['annotations']) > 0]
    return dataset_dicts

def train_test_split(dataset_dicts, train_ratio=0.7, shuffle=True):
    random.seed(0)
    if shuffle:
        random.shuffle(dataset_dicts)
    split_size = int(len(dataset_dicts) * train_ratio)
    
    train_dataset_dicts = dataset_dicts[:split_size]
    test_dataset_dicts = dataset_dicts[split_size:]
    return train_dataset_dicts, test_dataset_dicts

def bind_dataset_type(dataset_dicts, dataset_type):
    for i in range(len(dataset_dicts)):
        dataset_dicts[i]['params']['dataset_type'] = dataset_type
    return dataset_dicts

def select_sub_dirs(dataset_dicts, sub_dirs):
    new_dataset_dicts = []
    for dataset_dict in dataset_dicts:
        for sub_dir in sub_dirs:
            if sub_dir in dataset_dict['file_name']:
                new_dataset_dicts.append(dataset_dict)
                break
    return new_dataset_dicts

def select_count(dataset_dicts, count, is_train=True):
    random.seed(0)
    random.shuffle(dataset_dicts)
    if is_train:
        return dataset_dicts[:count]
    else:
        return dataset_dicts[-count:]

def select_class_boxes_count(dataset_dicts, class_ids, start_level, count, test_count, is_train=True):
    count_map = {}
    for dataset_dict in dataset_dicts:
        num_objs = len([obj for obj in dataset_dict['annotations'] if obj['category_id'] in class_ids])
        if num_objs not in count_map:
            count_map[num_objs] = []
        count_map[num_objs].append(dataset_dict)
    
    num_dataset_dicts = len(dataset_dicts)

    test_count_map = {}
    for num_objs, sub_dataset_dicts in count_map.items():
        num_test = test_count * len(sub_dataset_dicts) // num_dataset_dicts
        test_count_map[num_objs] = count_map[num_objs][:num_test]
        count_map[num_objs] = count_map[num_objs][num_test:]

    if not is_train:
        test_dataset_dicts = []
        for _, sub_dataset_dicts in test_count_map.items():
            test_dataset_dicts += sub_dataset_dicts
        return test_dataset_dicts

    sorted_num_objs = sorted(list(count_map.keys()))
    count_start_map = {}
    for i in range(len(sorted_num_objs)):
        num_objs = sorted_num_objs[i]
        if i == 0:
            count_start_map[num_objs] = 0
        else:
            num_pre_objs = sorted_num_objs[i-1]
            count_start_map[num_objs] = count_start_map[num_pre_objs] + len(count_map[num_pre_objs])

    assert start_level in count_start_map.keys()

    train_dataset_dicts = []
    for num_objs in sorted_num_objs:
        train_dataset_dicts += count_map[num_objs]

    train_dataset_dicts = train_dataset_dicts[count_start_map[start_level]: count_start_map[start_level] + count]
    return train_dataset_dicts


def get_dataset_dicts(dataset_name, **kwargs):
    available_set = set(['sep', 'crop', 'remove_empty', 'split', 'bind_dataset_type', 'select_sub_dirs', 'select_count', 'have', 'class_boxes_count'])
    assert set(kwargs.keys()).issubset(available_set), 'Invalid kwargs'

    content = ''
    if 'sep' in kwargs:
        content += '-[sep]-' + '-'.join(kwargs['sep']['classes_list'])
    if 'crop' in kwargs:
        content += '-[crop]-' + '-'.join([str(i) for i in kwargs['crop']['size']])
    if 'remove_empty' in kwargs:
        content += '-[remove_empty]'
    if 'split' in kwargs:
        content += '-[split]-' + str(kwargs['split']['train_ratio']) + ('-shuffle' if kwargs['split']['shuffle'] else '')
    if 'bind_dataset_type' in kwargs:
        content += '-[bdt]-' + str(kwargs['bind_dataset_type']['dataset_type'])
    if 'select_sub_dirs' in kwargs:
        content += '-[ssd]-' + hashlib.blake2b(bytes(str(kwargs['select_sub_dirs']['sub_dirs']), encoding='utf-8'), digest_size=2).hexdigest()
    if 'select_count' in kwargs:
        content += '-[sc]-' + hashlib.blake2b(bytes(str(kwargs['select_count']['count']) + str(kwargs['select_count']['is_train']), encoding='utf-8'), digest_size=2).hexdigest()
    if 'have' in kwargs:
        content += '-[have]-' + '-'.join(kwargs['have']['classes_list'])
    if 'class_boxes_count' in kwargs:
        content += '-[cbc]-' + '-'.join([str(item) for item in kwargs['class_boxes_count']['class_ids']]) + '&' + str(kwargs['class_boxes_count']['start_level']) + '-' + str(kwargs['class_boxes_count']['count']) + '-' + str(kwargs['class_boxes_count']['test_count']) + '-' + str(kwargs['class_boxes_count']['is_train'])[0]
    pickle_file_name = os.path.join(DATASET_OUTPUT_DIR, DATASET + '-' + dataset_name + content + '-dataset.pkl')
    dataset_dicts = None
    if os.path.exists(pickle_file_name):
        with open(pickle_file_name, 'rb') as f:
            dataset_dicts = pickle.load(f)
    else:
        if 'bind_dataset_type' in kwargs:
            params = kwargs.pop('bind_dataset_type')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = bind_dataset_type(dataset_dicts, params['dataset_type'])
        elif 'class_boxes_count' in kwargs:
            params = kwargs.pop('class_boxes_count')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = select_class_boxes_count(dataset_dicts, class_ids=params['class_ids'], start_level=params['start_level'], count=params['count'], test_count=params['test_count'], is_train=params['is_train'])
        elif 'select_count' in kwargs:
            params = kwargs.pop('select_count')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = select_count(dataset_dicts, count=params['count'], is_train=params['is_train'])
        elif 'split' in kwargs:
            params = kwargs.pop('split')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            assert dataset_name in ['train', 'test']
            if dataset_name == 'train':
                dataset_dicts = train_test_split(dataset_dicts, params['train_ratio'], params['shuffle'])[0]
            else:
                dataset_dicts = train_test_split(dataset_dicts, params['train_ratio'], params['shuffle'])[1]
        elif 'remove_empty' in kwargs:
            kwargs.pop('remove_empty')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = remove_empty(dataset_dicts)
        elif 'crop' in kwargs:
            params = kwargs.pop('crop')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = crop_dataset_dicts(dataset_dicts, params['size'])
        elif 'sep' in kwargs:
            params = kwargs.pop('sep')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = select_classes(dataset_dicts, params['classes_list'])
        elif 'have' in kwargs:
            params = kwargs.pop('have')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = have_classes(dataset_dicts, params['classes_list'])
        elif 'select_sub_dirs' in kwargs:
            params = kwargs.pop('select_sub_dirs')
            dataset_dicts = get_dataset_dicts(dataset_name, **kwargs)
            dataset_dicts = select_sub_dirs(dataset_dicts, params['sub_dirs'])
        else:
            assert len(kwargs) == 0
            dataset_dicts = get_OURS_dicts(DATASET_BASE_DIR)
        os.makedirs(os.path.dirname(pickle_file_name), exist_ok=True)
        with open(pickle_file_name, 'wb') as f:
            pickle.dump(dataset_dicts, f)
    return dataset_dicts

dataset_name_train = 'train'
dataset_name_test = 'test'

DATASET_INFO_REGISTRY.register(DATASET+'-g', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-cctv', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-no-cctv', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-numtest', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num1000', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num900', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num800', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num700', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num600', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num500', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num400', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num300', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num200', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num100', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-remove-empty-num50', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-person', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-car', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-numtest', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-num50', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-num100', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-num200', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-num300', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-cbc-test', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-cbc-1-100', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-cbc-2-100', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-cbc-3-100', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-have-car-cbc-4-100', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-car-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-split-g', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-split-g-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))
DATASET_INFO_REGISTRY.register(DATASET+'-g-no-cctv-car-remove-empty', get_dataset_info_cfg(os.path.join(DATASET_CONFIG_DIR, 'OURS-g.yaml')))


for postfix, kwargs in {
        '-g': {},
        '-g-cctv': {
            'select_sub_dirs': {
                'sub_dirs': ['20200515'],
            }
        },
        '-g-no-cctv': {
            'select_sub_dirs': {
                'sub_dirs': ['20200508', '20200510'],
            }
        },
        '-g-remove-empty': {
            'remove_empty': None
        },
        '-g-remove-empty-numtest': {
            'remove_empty': None,
            'select_count': {
                'count': 489,
                'is_train': False,
            }
        },
        '-g-remove-empty-num1000': {
            'remove_empty': None,
            'select_count': {
                'count': 1000,
                'is_train': True,
            }
        },
        '-g-remove-empty-num900': {
            'remove_empty': None,
            'select_count': {
                'count': 900,
                'is_train': True,
            }
        },
        '-g-remove-empty-num800': {
            'remove_empty': None,
            'select_count': {
                'count': 800,
                'is_train': True,
            }
        },
        '-g-remove-empty-num700': {
            'remove_empty': None,
            'select_count': {
                'count': 700,
                'is_train': True,
            }
        },
        '-g-remove-empty-num600': {
            'remove_empty': None,
            'select_count': {
                'count': 600,
                'is_train': True,
            }
        },
        '-g-remove-empty-num500': {
            'remove_empty': None,
            'select_count': {
                'count': 500,
                'is_train': True,
            }
        },
        '-g-remove-empty-num400': {
            'remove_empty': None,
            'select_count': {
                'count': 400,
                'is_train': True,
            }
        },
        '-g-remove-empty-num300': {
            'remove_empty': None,
            'select_count': {
                'count': 300,
                'is_train': True,
            }
        },
        '-g-remove-empty-num200': {
            'remove_empty': None,
            'select_count': {
                'count': 200,
                'is_train': True,
            }
        },
        '-g-remove-empty-num100': {
            'remove_empty': None,
            'select_count': {
                'count': 100,
                'is_train': True,
            }
        },
        '-g-remove-empty-num50': {
            'remove_empty': None,
            'select_count': {
                'count': 50,
                'is_train': True,
            }
        },
        '-split-g': {
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
        },
        '-split-g-remove-empty': {
            'split': {
                'train_ratio': 0.7,
                'shuffle': True,
            },
            'remove_empty': None
        },
        '-g-person': {
            'sep': {
                'classes_list': ['pedestrian', 'person'],
            }
        },
        '-g-car': {
            'sep': {
                'classes_list': ['car', 'track'],
            }
        },
        '-g-no-cctv-car-remove-empty': {
            'sep': {
                'classes_list': ['car', 'track'],
            },
            'select_sub_dirs': {
                'sub_dirs': ['20200508', '20200510'],
            },
            'remove_empty': None,
        },
        '-g-car-remove-empty': {
            'sep': {
                'classes_list': ['car', 'track'],
            },
            'remove_empty': None
        },
        '-g-have-car': {
            'have': {
                'classes_list': ['car', 'track'],
            },
        },
        '-g-have-car-numtest': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'select_count': {
                'count': 153,
                'is_train': False,
            }
        },
        '-g-have-car-num50': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'select_count': {
                'count': 50,
                'is_train': True,
            }
        },
        '-g-have-car-num100': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'select_count': {
                'count': 100,
                'is_train': True,
            }
        },
        '-g-have-car-num200': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'select_count': {
                'count': 200,
                'is_train': True,
            }
        },
        '-g-have-car-num300': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'select_count': {
                'count': 300,
                'is_train': True,
            }
        },
        '-g-have-car-cbc-test': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'class_boxes_count': {
                'class_ids': [2,3],
                'start_level': 1,
                'count': 100,
                'test_count': 100,
                'is_train': False,
            },
        },
        '-g-have-car-cbc-1-100': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'class_boxes_count': {
                'class_ids': [2,3],
                'start_level': 1,
                'count': 100,
                'test_count': 100,
                'is_train': True,
            },
        },
        '-g-have-car-cbc-2-100': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'class_boxes_count': {
                'class_ids': [2,3],
                'start_level': 2,
                'count': 100,
                'test_count': 100,
                'is_train': True,
            },
        },
        '-g-have-car-cbc-3-100': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'class_boxes_count': {
                'class_ids': [2,3],
                'start_level': 3,
                'count': 100,
                'test_count': 100,
                'is_train': True,
            },
        },
        '-g-have-car-cbc-4-100': {
            'have': {
                'classes_list': ['car', 'track'],
            },
            'class_boxes_count': {
                'class_ids': [2,3],
                'start_level': 4,
                'count': 100,
                'test_count': 100,
                'is_train': True,
            },
        },
    }.items():
    dataset_full_name = DATASET + postfix
    
    DATASET_REGISTRY.register(dataset_full_name +'-train', lambda dataset_name=dataset_name_train, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))
    DATASET_REGISTRY.register(dataset_full_name +'-test', lambda dataset_name=dataset_name_test, kwargs=kwargs, dataset_info=DATASET_INFO_REGISTRY.get(dataset_full_name): setup_dataset_info(map_category(get_dataset_dicts(dataset_name, **kwargs), map_dict), dataset_info))

