File size: 3,664 Bytes
4d0eb62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List

import mat4py
from mmengine import get_file_backend

from mmpretrain.registry import DATASETS
from .base_dataset import BaseDataset
from .categories import DTD_CATEGORIES


@DATASETS.register_module()
class DTD(BaseDataset):
    """The Describable Texture Dataset (DTD).

    Support the `Describable Texture Dataset <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_ Dataset.
    After downloading and decompression, the dataset directory structure is as follows.

    DTD dataset directory: ::

        dtd
        β”œβ”€β”€ images
        β”‚   β”œβ”€β”€ banded
        |   |   β”œβ”€β”€banded_0002.jpg
        |   |   β”œβ”€β”€banded_0004.jpg
        |   |   └── ...
        β”‚   └── ...
        β”œβ”€β”€ imdb
        β”‚   └── imdb.mat
        β”œβ”€β”€ labels
        |   |   β”œβ”€β”€labels_joint_anno.txt
        |   |   β”œβ”€β”€test1.txt
        |   |   β”œβ”€β”€test2.txt
        |   |   └── ...
        β”‚   └── ...
        └── ....

    Args:
        data_root (str): The root directory for Describable Texture dataset.
        split (str, optional): The dataset split, supports "train",
            "val", "trainval", and "test". Default to "trainval".

    Examples:
        >>> from mmpretrain.datasets import DTD
        >>> train_dataset = DTD(data_root='data/dtd', split='trainval')
        >>> train_dataset
        Dataset DTD
            Number of samples:  3760
            Number of categories:       47
            Root of dataset:    data/dtd
        >>> test_dataset = DTD(data_root='data/dtd', split='test')
        >>> test_dataset
        Dataset DTD
            Number of samples:  1880
            Number of categories:       47
            Root of dataset:    data/dtd
    """  # noqa: E501

    METAINFO = {'classes': DTD_CATEGORIES}

    def __init__(self, data_root: str, split: str = 'trainval', **kwargs):

        splits = ['train', 'val', 'trainval', 'test']
        assert split in splits, \
            f"The split must be one of {splits}, but get '{split}'"
        self.split = split

        data_prefix = 'images'
        test_mode = split == 'test'

        self.backend = get_file_backend(data_root, enable_singleton=True)
        ann_file = self.backend.join_path('imdb', 'imdb.mat')

        super(DTD, self).__init__(
            ann_file=ann_file,
            data_root=data_root,
            data_prefix=data_prefix,
            test_mode=test_mode,
            **kwargs)

    def load_data_list(self):
        """Load images and ground truth labels."""

        data = mat4py.loadmat(self.ann_file)['images']
        names = data['name']
        labels = data['class']
        parts = data['set']
        num = len(names)
        assert num == len(labels) == len(parts), 'get error ann file'

        if self.split == 'train':
            target_set = {1}
        elif self.split == 'val':
            target_set = {2}
        elif self.split == 'test':
            target_set = {3}
        else:
            target_set = {1, 2}

        data_list = []
        for i in range(num):
            if parts[i] in target_set:
                img_name = names[i]
                img_path = self.backend.join_path(self.img_prefix, img_name)
                gt_label = labels[i] - 1
                info = dict(img_path=img_path, gt_label=gt_label)
                data_list.append(info)

        return data_list

    def extra_repr(self) -> List[str]:
        """The extra repr information of the dataset."""
        body = [
            f'Root of dataset: \t{self.data_root}',
        ]
        return body