from torch.utils.data import DataLoader, Dataset
import json
import os
from PIL import Image
from transforms import Resize, RandomFlip, ToTensor, Normalize, Compose
from utils import collate_fn
from tqdm import tqdm


class CocoDetection(Dataset):
    def __init__(self, anno_root, image_root, transforms=None):
        print('start loading into memory!')
        self.dataset = []
        self.transforms = transforms
        with open(anno_root, 'r') as f:
            data = json.load(f)
            img_list = data['images']
            img_anno = data['annotations']
            file_dic = {}
            label_dic = {}
            #{image_id:file_name}   [(filename, [boxes, catergory_id]),(),......}]
            #{image_id:[boxes, category_id]}

            #for {image_id:(boxes, category_id)}
            for anno in img_anno:
                if anno['image_id'] in label_dic.keys():
                    label_dic[anno['image_id']][0].append(anno['bbox'])
                    label_dic[anno['image_id']][1].append(anno['category_id'])
                else:
                    label_dic[anno['image_id']] = ([anno['bbox']], [anno['category_id']])
            #for {image_id:file_name}
            for img in img_list:
                file_dic[img['id']] = os.path.join(image_root, img['file_name'])
                

            #for [(filename, (boxes, catergory_id)),(),......}]
            img_idx = list(sorted(file_dic.keys()))
            for i in img_idx:
                if i in label_dic.keys():
                    self.dataset.append((i, file_dic[i], label_dic[i]))
                # else:
                #     self.dataset.append((file_dic[i], ([], [81])))
            print('dataset completed!')

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        img_id, img_path, label = self.dataset[idx]
        boxes, category = label
        category_remap = [category_mscoco_remap[k] for k in category]
        img = Image.open(img_path).convert("RGB")
        w, h = img.size
        if self.transforms:
            img, boxes = self.transforms(img, boxes)
        return img, (boxes, category_remap), img_id, (w, h)

mscoco_category2name = {
    1: 'person',
    2: 'bicycle',
    3: 'car',
    4: 'motorcycle',
    5: 'airplane',
    6: 'bus',
    7: 'train',
    8: 'truck',
    9: 'boat',
    10: 'traffic light',
    11: 'fire hydrant',
    13: 'stop sign',
    14: 'parking meter',
    15: 'bench',
    16: 'bird',
    17: 'cat',
    18: 'dog',
    19: 'horse',
    20: 'sheep',
    21: 'cow',
    22: 'elephant',
    23: 'bear',
    24: 'zebra',
    25: 'giraffe',
    27: 'backpack',
    28: 'umbrella',
    31: 'handbag',
    32: 'tie',
    33: 'suitcase',
    34: 'frisbee',
    35: 'skis',
    36: 'snowboard',
    37: 'sports ball',
    38: 'kite',
    39: 'baseball bat',
    40: 'baseball glove',
    41: 'skateboard',
    42: 'surfboard',
    43: 'tennis racket',
    44: 'bottle',
    46: 'wine glass',
    47: 'cup',
    48: 'fork',
    49: 'knife',
    50: 'spoon',
    51: 'bowl',
    52: 'banana',
    53: 'apple',
    54: 'sandwich',
    55: 'orange',
    56: 'broccoli',
    57: 'carrot',
    58: 'hot dog',
    59: 'pizza',
    60: 'donut',
    61: 'cake',
    62: 'chair',
    63: 'couch',
    64: 'potted plant',
    65: 'bed',
    67: 'dining table',
    70: 'toilet',
    72: 'tv',
    73: 'laptop',
    74: 'mouse',
    75: 'remote',
    76: 'keyboard',
    77: 'cell phone',
    78: 'microwave',
    79: 'oven',
    80: 'toaster',
    81: 'sink',
    82: 'refrigerator',
    84: 'book',
    85: 'clock',
    86: 'vase',
    87: 'scissors',
    88: 'teddy bear',
    89: 'hair drier',
    90: 'toothbrush',
    # 91: 'background',
}

category_mscoco_remap = {k:i for i, k in enumerate(mscoco_category2name.keys())}

# if __name__ == '__main__':
#     transforms = Compose([Resize((640, 640)),
#                             RandomFlip(),
#                             ToTensor(),
#                             Normalize()])
#     anno_root = '/mnt/sdb2/ray/mmdetection-main/data/coco/annotations/instances_val2017.json'
#     img_root = '/mnt/sdb2/ray/mmdetection-main/data/coco/val2017/'
#     dataset = CocoDetection(anno_root=anno_root, image_root=img_root, transforms=transforms)
#     dataloader = DataLoader(dataset, batch_size=2, collate_fn=collate_fn, num_workers=8)
#     a = 0
#     for samples, targets, img_ids, img_ori_size in dataloader:
#         for i in targets:
#             if i['target_box'].numel() == 0:
#                 a += 1
#     print(a)


