"""
V-COCO dataset in Python3

Fred Zhang <frederic.zhang@anu.edu.au>

The Australian National University
Australian Centre for Robotic Vision
"""

import os
import cv2
import json
import random
import logging
import itertools
import numpy as np
from PIL import Image

import torch
from torch.utils.data import Dataset
import torchvision
import torchvision.transforms.functional as F
from torchvision.ops.boxes import box_iou
from typing import Optional, List, Callable, Tuple, Any, Dict

from utils.utils import to_tensor
from utils import ddp_opx
from utils.vis import show_ori_image_with_joints
logger = logging.getLogger(__name__)

class VCOCO(Dataset):
    """
    V-COCO dataset
    """
    def __init__(self, cfg, root, image_set, is_train=False, transforms=None):
        super().__init__()
        self.cfg = cfg
        image_dir = dict(
            train='images/train2014',
            val='images/train2014',
            trainval='images/train2014',
            test='images/val2014'
        )
        anno_file = os.path.join(root , f'annotations/fredzzhang/instances_vcoco_{image_set}.json')
        with open(anno_file, 'r') as f:
            anno = json.load(f)

        self._anno_file = anno_file

        # Compute metadata
        self._compute_metatdata(anno)

        self.detection_root = os.path.join(root, 'detections', image_set)
        self._root = os.path.join(root, image_dir[image_set])
        self.image_set = image_set
        self.transforms = transforms
        self.is_train = is_train
        self._init_pose_()

        self.num_object_cls = None
        self.num_action_cls = 24
        self.scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]

    def __len__(self) -> int:
        """Return the number of images"""
        return len(self._keep)

    def __getitem__(self, i: int) -> Tuple[Any, Any]:
        """
        Parameters:
        -----------
        i: int
            The index to an image.
        
        Returns:
        --------
        image: Any
            Input Image. By default, when relevant transform arguments are None,
            the image is in the form of PIL.Image.
        target: Any
            The annotation associated with the given image. By default, when
            relevant transform arguments are None, the taget is a dict with the
            following keys:
                boxes_h: List[list]
                    Human bouding boxes in a human-object pair encoded as the top
                    left and bottom right corners
                boxes_o: List[list]
                    Object bounding boxes corresponding to the human boxes
                actions: List[int]
                    Ground truth action class for each human-object pair
                objects: List[int]
                    Object category index for each object in human-object pairs. The
                    indices follow the 80-class standard, where 0 means background and
                    1 means person.
        """
        image_id = self.image_id(i)
        image = self.load_image(os.path.join(
            self._root, self.filename(i)
        ))
        target = self._anno[self._keep[i]].copy()
        target.pop('file_name')
        target = to_tensor(target)

        target['labels'] = target['actions']
        target['object'] = target.pop('objects')
        target['image_id'] = int(image_id)
        ow, oh = image.size
        target['ori_size'] = torch.tensor((ow, oh))
        
        if 'fasterrcnn' in self.cfg.MODEL.NAME:
            detection_path = os.path.join(
                self.detection_root,
                self.filename(i).replace('jpg', 'json')
            )
            target['detections'] = to_tensor(self.get_detect_box(detection_path))
        if self.pose_data and self.pose_data._has_pose(image_id):
            pose_anno = self.pose_data._get_pose_anno(image_id)
            target['pose'] = pose_anno
            # target['pose'] = self.match_by_box(pose_anno, target['boxes_h'])  # 直接后续使用Pose_GTBOX和DETR_BOX去匹配保留哪些Pose_GTBOX
        
        # --------------- data augmentation ---------------
        if self.is_train:
            # Flip
            if random.random() < 0.5:
                image, target = self.hflip(image, target)
            # ColorJitter
            image = torchvision.transforms.ColorJitter(.4, .4, .4)(image)
            if random.random() < 0.5:
                size = random.choice(self.scales)
                image, target = self.resize(image, target, size, max_size=1333)
            else:
                size = random.choice([400, 500, 600])
                image, target = self.resize(image, target, size, max_size=None)
                size = random.choice(self.scales)
                image, target = self.resize(image, target, size, max_size=1333)
        else:   # test eval
            size = random.choice([800])
            image, target = self.resize(image, target, size, max_size=1333)

        image = self.transforms(image)
        h, w = image.shape[-2:]
        target["size"] = torch.tensor([h, w])
        return image, target

    def _compute_metatdata(self, f: dict) -> None:
        self._anno = f['annotations']
        self._actions = f['classes']
        self._objects = f['objects']
        self._image_ids = f['images']
        self._action_to_object = f['action_to_object']

        keep = list(range(len(f['images'])))
        num_instances = [0 for _ in range(len(f['classes']))]
        valid_objects = [[] for _ in range(len(f['classes']))]
        for i, anno_in_image in enumerate(f['annotations']):
            # Remove images without human-object pairs
            if len(anno_in_image['actions']) == 0:
                keep.remove(i)
                continue
            for act, obj in zip(anno_in_image['actions'], anno_in_image['objects']):
                num_instances[act] += 1
                if obj not in valid_objects[act]:
                    valid_objects[act].append(obj)

        objects = list(itertools.chain.from_iterable(valid_objects))
        self._present_objects = np.unique(np.asarray(objects)).tolist()
        self._num_instances = num_instances
        self._keep = keep
    
    def _init_pose_(self):
        if 'pose' in self.cfg.MODEL.NAME:
            from dataset.vcoco_pose import VCOCOPoseDataset
            self.pose_data = VCOCOPoseDataset(self.cfg, self.cfg.DATASET.ROOT, self.image_set, self.is_train)
        else:
            self.pose_data = None

    def get_detect_box(self, detection_path):
        with open(detection_path, 'r') as f:
            detection = to_tensor(json.load(f))
        return detection

    def match_by_box(self, pose_anno, boxes_h):
        """只保留有HOI的Pose数据"""
        img_numpy, boxes_pose, joints, joints_vis = pose_anno
        boxes_h = torch.unique(boxes_h, dim=0, sorted=False)
        # if boxes_h.shape[0] > boxes_pose.shape[0]:
            # print('debug check match method')
        ious = box_iou(boxes_h, boxes_pose)
        gt = torch.ge(ious, 0.99)
        _, remain_idx = torch.nonzero(gt).unbind(1)

        boxes_pose = boxes_pose[remain_idx]
        joints = joints[remain_idx]
        joints_vis = joints_vis[remain_idx]

        return {
            'img': img_numpy,
            'joints': joints,
            'joints_vis': joints_vis,
            'boxes_pose': boxes_pose
        }

    def __repr__(self) -> str:
        """Return the executable string representation"""
        reprstr = self.__class__.__name__ + '(root=' + repr(self._root)
        reprstr += ', anno_file='
        reprstr += repr(self._anno_file)
        reprstr += ')'
        # Ignore the optional arguments
        return reprstr

    def __str__(self) -> str:
        """Return the readable string representation"""
        reprstr = 'Dataset: ' + self.__class__.__name__ + '\n'
        reprstr += '\tNumber of images: {}\n'.format(self.__len__())
        reprstr += '\tImage directory: {}\n'.format(self._root)
        reprstr += '\tAnnotation file: {}\n'.format(self._anno_file)
        return reprstr

    @property
    def annotations(self) -> List[dict]:
        return self._anno

    @property
    def actions(self) -> List[str]:
        """Return the list of actions"""
        return self._actions

    @property
    def objects(self) -> List[str]:
        """Return the list of objects"""
        return self._objects

    @property
    def present_objects(self) -> List[int]:
        """Return the list of objects that are present in the dataset partition"""
        return self._present_objects

    @property
    def num_instances(self) -> List[int]:
        """Return the number of human-object pairs for each action class"""
        return self._num_instances

    @property
    def action_to_object(self) -> List[list]:
        """Return the list of objects for each action"""
        return self._action_to_object

    @property
    def object_to_action(self) -> Dict[int, list]:
        """Return the list of actions for each object"""
        object_to_action = {obj: [] for obj in list(range(1, 81))}
        for act, obj in enumerate(self._action_to_object):
            for o in obj:
                if act not in object_to_action[o]:
                    object_to_action[o].append(act)
        return object_to_action

    def image_id(self, idx: int) -> int:
        """Return the COCO image ID"""
        return self._image_ids[self._keep[idx]]

    def filename(self, idx: int) -> str:
        """Return the image file name given the index"""
        return self._anno[self._keep[idx]]['file_name']

    def load_image(self, path: str, mode='PIL'): 
        if mode == 'opencv':
            """Load an image as numpy.ndarray"""
            data_numpy = cv2.imread(
                path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION
            )
            data_numpy = cv2.cvtColor(data_numpy, cv2.COLOR_BGR2RGB)
            return data_numpy
        else:
            """Load an image as PIL.Image"""
            return Image.open(path).convert('RGB')

    def image_size(self, idx: int) -> Tuple[int, int]:
        """Return the size (width, height) of an image"""
        return self.load_image(os.path.join(
            self._root,
            self.filename(idx),
        ), mode='PIL').size

    def hflip(self, image, target):
        if isinstance(image, np.ndarray):   # OpenCV
            flipped_image = flipped_image[:, ::-1, :]
        else:
            flipped_image = F.hflip(image)

        w, h = image.size

        target = target.copy()
        if "boxes" in target:
            boxes = target["boxes"]
            boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
            target["boxes"] = boxes

        # Flip human and object boxes
        if "boxes_h" in target:
            boxes = target["boxes_h"]
            boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
            target["boxes_h"] = boxes
        if "boxes_o" in target:
            boxes = target["boxes_o"]
            boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
            target["boxes_o"] = boxes

        if 'detections' in target:
            boxes = target['detections']['boxes']
            boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
            target['detections']['boxes'] = boxes
        
        if 'pose' in target:
            boxes, joints, img = target['pose']['boxes_pose'], target['pose']['joints'], target['pose']['img']
            # TODO 提升flip偏移12个像素的小问题 
            # CVPR 2020 UDP： https://github.com/HuangJunJie2017/mmpose/commit/12a0e1e67c25667af0f6c97ecdc679ed6d2bacbd#diff-6f7db4195f9b076c0397f3e806a1d54846e879457f4c56d5ba72f47b550dfcdf
            flip_joints = joints[:, :, [0, 1, 2]] * torch.as_tensor([-1, 1, 1])+ torch.as_tensor([w, 0, 0])
            flip_boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
            target['pose']['boxes_pose'] = flip_boxes
            target['pose']['joints'] = flip_joints
            target['pose']['img'] = img[:, ::-1, :]

        if "masks" in target:
            target['masks'] = target['masks'].flip(-1)

        return flipped_image, target
    
    def resize(self, image, target, size, max_size=None):
        # size can be min_size (scalar) or (w, h) tuple

        def get_size_with_aspect_ratio(image_size, size, max_size=None):
            w, h = image_size
            if max_size is not None:
                min_original_size = float(min((w, h)))
                max_original_size = float(max((w, h)))
                if max_original_size / min_original_size * size > max_size:
                    size = int(round(max_size * min_original_size / max_original_size))

            if (w <= h and w == size) or (h <= w and h == size):
                return (h, w)

            if w < h:
                ow = size
                oh = int(size * h / w)
            else:
                oh = size
                ow = int(size * w / h)

            return (oh, ow)

        def get_size(image_size, size, max_size=None):
            if isinstance(size, (list, tuple)):
                return size[::-1]
            else:
                return get_size_with_aspect_ratio(image_size, size, max_size)

        size = get_size(image.size, size, max_size)
        
        if isinstance(image, np.ndarray):   # OpenCV
            rescaled_image = cv2.resize(image, size)
        else:
            rescaled_image = F.resize(image, size)

        if target is None:
            return rescaled_image, None

        ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
        ratio_width, ratio_height = ratios

        target = target.copy()
        if "boxes" in target:
            boxes = target["boxes"]
            scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
            target["boxes"] = scaled_boxes

        # Resize human and object boxes
        if "boxes_h" in target:
            boxes = target["boxes_h"]
            scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
            target["boxes_h"] = scaled_boxes
        if "boxes_o" in target:
            boxes = target["boxes_o"]
            scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
            target["boxes_o"] = scaled_boxes

        if "detections" in target:
            boxes = target['detections']['boxes']
            scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
            target['detections']['boxes'] = scaled_boxes
        
        if 'pose' in target:
            boxes, joints, img = target['pose']['boxes_pose'], target['pose']['joints'], target['pose']['img']
            scaled_joints = joints * torch.as_tensor([ratio_width, ratio_height, 1])
            scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
            target['pose']['boxes_pose'], target['pose']['joints'] = scaled_boxes, scaled_joints
            target['pose']['img'] = cv2.resize(img, size[::-1])

        if "area" in target:
            area = target["area"]
            scaled_area = area * (ratio_width * ratio_height)
            target["area"] = scaled_area

        h, w = size
        target["size"] = torch.tensor([h, w])

        if "masks" in target:
            target['masks'] = torchvision.ops.misc.interpolate(target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5

        return rescaled_image, target