# uncompyle6 version 3.8.0
# Python bytecode 3.7.0 (3394)
# Decompiled from: Python 3.9.12 (main, Apr  4 2022, 05:22:27) [MSC v.1916 64 bit (AMD64)]
# Embedded file name: /media/psdz/yw-02/project/PEHO/tools/../lib/dataset/vcoco_upt.py
# Compiled at: 2022-12-12 15:25:15
# Size of source mod 2**32: 12924 bytes
"""
V-COCO dataset in Python3

Fred Zhang <frederic.zhang@anu.edu.au>

The Australian National University
Australian Centre for Robotic Vision
"""
import os, json, itertools, numpy as np
from typing import Optional, List, Callable, Tuple, Any, Dict
from PIL import Image
from torch.utils.data import Dataset
import torch, torchvision

class StandardTransform:
    __doc__ = 'https://github.com/pytorch/vision/blob/master/torchvision/datasets/vision.py'

    def __init__(self, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None) -> None:
        self.transform = transform
        self.target_transform = target_transform

    def __call__(self, inputs: Any, target: Any) -> Tuple[(Any, Any)]:
        if self.transform is not None:
            inputs = self.transform(inputs)
        if self.target_transform is not None:
            target = self.target_transform(target)
        return (
         inputs, target)

    def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
        lines = transform.__repr__().splitlines()
        return ['{}{}'.format(head, lines[0])] + ['{}{}'.format(' ' * len(head), line) for line in lines[1:]]

    def __repr__(self) -> str:
        body = [self.__class__.__name__]
        if self.transform is not None:
            body += self._format_transform_repr(self.transform, 'Transform: ')
        if self.target_transform is not None:
            body += self._format_transform_repr(self.target_transform, 'Target transform: ')
        return '\n'.join(body)


class ImageDataset(Dataset):

    def __init__(self, root: str, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, transforms: Optional[Callable]=None) -> None:
        self._root = root
        self._transform = transform
        self._target_transform = target_transform
        if transforms is None:
            self._transforms = StandardTransform(transform, target_transform)
        else:
            if transform is not None or target_transform is not None:
                print('WARNING: Argument transforms is given, transform/target_transform are ignored.')
                self._transforms = transforms
            else:
                self._transforms = transforms

    def __len__(self):
        raise NotImplementedError

    def __getitem__(self, i):
        raise NotImplementedError

    def __repr__(self) -> str:
        """Return the executable string representation"""
        reprstr = self.__class__.__name__ + '(root=' + repr(self._root)
        reprstr += ')'
        return reprstr

    def __str__(self) -> str:
        """Return the readable string representation"""
        reprstr = 'Dataset: ' + self.__class__.__name__ + '\n'
        reprstr += '\tNumber of images: {}\n'.format(self.__len__())
        reprstr += '\tRoot path: {}\n'.format(self._root)
        return reprstr

    def load_image(self, path: str) -> Image:
        """Load an image as PIL.Image"""
        return Image.open(path).convert('RGB')


class VCOCO(ImageDataset):

    def __init__(self, root, anno_file, transform=None, target_transform=None, transforms=None):
        super().__init__(root, transform, target_transform, transforms)
        with open(anno_file, 'r') as (f):
            anno = json.load(f)
        self.num_object_cls = None
        self.num_action_cls = 24
        self._anno_file = anno_file
        self._compute_metatdata(anno)

    def __len__(self) -> int:
        """Return the number of images"""
        return len(self._keep)

    def __getitem__(self, i: int) -> Tuple[(Any, Any)]:
        """
        Parameters:
        -----------
        i: int
            The index to an image.
        
        Returns:
        --------
        image: Any
            Input Image. By default, when relevant transform arguments are None,
            the image is in the form of PIL.Image.
        target: Any
            The annotation associated with the given image. By default, when
            relevant transform arguments are None, the taget is a dict with the
            following keys:
                boxes_h: List[list]
                    Human bouding boxes in a human-object pair encoded as the top
                    left and bottom right corners
                boxes_o: List[list]
                    Object bounding boxes corresponding to the human boxes
                actions: List[int]
                    Ground truth action class for each human-object pair
                objects: List[int]
                    Object category index for each object in human-object pairs. The
                    indices follow the 80-class standard, where 0 means background and
                    1 means person.
        """
        image_id = self.image_id(i)
        image = self.load_image(os.path.join(self._root, self.filename(i)))
        target = self._anno[self._keep[i]].copy()
        target.pop('file_name')
        target['image_id'] = int(image_id)
        image, target = self._transforms(image, target)
        return (image, target)

    def __repr__(self) -> str:
        """Return the executable string representation"""
        reprstr = self.__class__.__name__ + '(root=' + repr(self._root)
        reprstr += ', anno_file='
        reprstr += repr(self._anno_file)
        reprstr += ')'
        return reprstr

    def __str__(self) -> str:
        """Return the readable string representation"""
        reprstr = 'Dataset: ' + self.__class__.__name__ + '\n'
        reprstr += '\tNumber of images: {}\n'.format(self.__len__())
        reprstr += '\tImage directory: {}\n'.format(self._root)
        reprstr += '\tAnnotation file: {}\n'.format(self._anno_file)
        return reprstr

    @property
    def annotations(self) -> List[dict]:
        return self._anno

    @property
    def actions(self) -> List[str]:
        """Return the list of actions"""
        return self._actions

    @property
    def objects(self) -> List[str]:
        """Return the list of objects"""
        return self._objects

    @property
    def present_objects(self) -> List[int]:
        """Return the list of objects that are present in the dataset partition"""
        return self._present_objects

    @property
    def num_instances(self) -> List[int]:
        """Return the number of human-object pairs for each action class"""
        return self._num_instances

    @property
    def action_to_object(self) -> List[list]:
        """Return the list of objects for each action"""
        return self._action_to_object

    @property
    def object_to_action(self) -> Dict[(int, list)]:
        """Return the list of actions for each object"""
        object_to_action = {obj:[] for obj in list(range(1, 81))}
        for act, obj in enumerate(self._action_to_object):
            for o in obj:
                if act not in object_to_action[o]:
                    object_to_action[o].append(act)

        return object_to_action

    def image_id(self, idx: int) -> int:
        """Return the COCO image ID"""
        return self._image_ids[self._keep[idx]]

    def filename(self, idx: int) -> str:
        """Return the image file name given the index"""
        return self._anno[self._keep[idx]]['file_name']

    def image_size(self, idx: int) -> Tuple[(int, int)]:
        """Return the size (width, height) of an image"""
        return self.load_image(os.path.join(self._root, self.filename(idx))).size

    def _compute_metatdata(self, f: dict) -> None:
        self._anno = f['annotations']
        self._actions = f['classes']
        self._objects = f['objects']
        self._image_ids = f['images']
        self._action_to_object = f['action_to_object']
        keep = list(range(len(f['images'])))
        num_instances = [0 for _ in range(len(f['classes']))]
        valid_objects = [[] for _ in range(len(f['classes']))]
        for i, anno_in_image in enumerate(f['annotations']):
            if len(anno_in_image['actions']) == 0:
                keep.remove(i)
                continue
            for act, obj in zip(anno_in_image['actions'], anno_in_image['objects']):
                num_instances[act] += 1
                if obj not in valid_objects[act]:
                    valid_objects[act].append(obj)

        objects = list(itertools.chain.from_iterable(valid_objects))
        self._present_objects = np.unique(np.asarray(objects)).tolist()
        self._num_instances = num_instances
        self._keep = keep
        self._target_hoi = dict()
        for i in keep:
            image_id = self._image_ids[i]
            anno = self._anno[i].copy()
            
            self._target_hoi[image_id] = {'file_name':anno['file_name'], 
             'boxes_h':to_tensor(anno['boxes_h']), 
             'boxes_o':to_tensor(anno['boxes_o']), 
             'actions':to_tensor(anno['actions']), 
             'labels':to_tensor(anno['actions']), 
             'object':to_tensor(anno['objects'])}


def _to_list_of_tensor(x, dtype=None, device=None):
    return [torch.as_tensor(item, dtype=dtype, device=device) for item in x]


def _to_tuple_of_tensor(x, dtype=None, device=None):
    return tuple((torch.as_tensor(item, dtype=dtype, device=device) for item in x))


def _to_dict_of_tensor(x, dtype=None, device=None):
    return dict([(k, torch.as_tensor(v, dtype=dtype, device=device)) for k, v in x.items()])


def to_tensor(x, input_format='tensor', dtype=None, device=None):
    """Convert input data to tensor based on its format"""
    if input_format == 'tensor':
        return torch.as_tensor(x, dtype=dtype, device=device)
    if input_format == 'pil':
        return torchvision.transforms.functional.to_tensor(x).to(dtype=dtype,
          device=device)
    if input_format == 'list':
        return _to_list_of_tensor(x, dtype=dtype, device=device)
    if input_format == 'tuple':
        return _to_tuple_of_tensor(x, dtype=dtype, device=device)
    if input_format == 'dict':
        return _to_dict_of_tensor(x, dtype=dtype, device=device)
    raise ValueError('Unsupported format {}'.format(input_format))


class ToTensor:
    __doc__ = 'Convert to tensor'

    def __init__(self, input_format='tensor', dtype=None, device=None):
        self.input_format = input_format
        self.dtype = dtype
        self.device = device

    def __call__(self, x):
        return to_tensor(x, input_format=(self.input_format),
          dtype=(self.dtype),
          device=(self.device))

    def __repr__(self):
        reprstr = self.__class__.__name__ + '('
        reprstr += 'input_format={}'.format(repr(self.input_format))
        reprstr += ', dtype='
        reprstr += repr(self.dtype)
        reprstr += ', device='
        reprstr += repr(self.device)
        reprstr += ')'
        return reprstr


def budild_vcoco_upt(data_root, image_set):
    partition = 'trainval' if 'train' in image_set else 'test'
    image_dir = dict(trainval='images/train2014',
      test='images/val2014')
    vcoco_dataset = VCOCO(root=(os.path.join(data_root, image_dir[partition])),
      anno_file=(os.path.join(data_root, 'instances_vcoco_{}.json'.format(partition))),
      target_transform=ToTensor(input_format='dict'))
    return vcoco_dataset