Spaces:
Runtime error
Runtime error
Upload 298 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mmseg/__init__.py +74 -0
- mmseg/__pycache__/__init__.cpython-310.pyc +0 -0
- mmseg/__pycache__/version.cpython-310.pyc +0 -0
- mmseg/apis/__init__.py +7 -0
- mmseg/apis/inference.py +214 -0
- mmseg/apis/mmseg_inferencer.py +361 -0
- mmseg/datasets/__init__.py +55 -0
- mmseg/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/ade.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/basesegdataset.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/chase_db1.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/cityscapes.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/coco_stuff.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/dark_zurich.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/dataset_wrappers.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/decathlon.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/drive.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/hrf.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/isaid.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/isprs.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/lip.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/loveda.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/mapillary.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/night_driving.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/pascal_context.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/potsdam.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/refuge.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/stare.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/synapse.cpython-310.pyc +0 -0
- mmseg/datasets/__pycache__/voc.cpython-310.pyc +0 -0
- mmseg/datasets/ade.py +94 -0
- mmseg/datasets/basesegdataset.py +269 -0
- mmseg/datasets/chase_db1.py +30 -0
- mmseg/datasets/cityscapes.py +30 -0
- mmseg/datasets/coco_stuff.py +99 -0
- mmseg/datasets/dark_zurich.py +15 -0
- mmseg/datasets/dataset_wrappers.py +136 -0
- mmseg/datasets/decathlon.py +96 -0
- mmseg/datasets/drive.py +30 -0
- mmseg/datasets/hrf.py +30 -0
- mmseg/datasets/isaid.py +39 -0
- mmseg/datasets/isprs.py +29 -0
- mmseg/datasets/lip.py +47 -0
- mmseg/datasets/loveda.py +29 -0
- mmseg/datasets/mapillary.py +176 -0
- mmseg/datasets/night_driving.py +15 -0
- mmseg/datasets/pascal_context.py +115 -0
- mmseg/datasets/potsdam.py +29 -0
- mmseg/datasets/refuge.py +28 -0
- mmseg/datasets/stare.py +29 -0
mmseg/__init__.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import mmcv
|
| 5 |
+
import mmengine
|
| 6 |
+
from packaging.version import parse
|
| 7 |
+
|
| 8 |
+
from .version import __version__, version_info
|
| 9 |
+
|
| 10 |
+
MMCV_MIN = '2.0.0rc4'
|
| 11 |
+
MMCV_MAX = '2.1.0'
|
| 12 |
+
MMENGINE_MIN = '0.5.0'
|
| 13 |
+
MMENGINE_MAX = '1.0.0'
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def digit_version(version_str: str, length: int = 4):
|
| 17 |
+
"""Convert a version string into a tuple of integers.
|
| 18 |
+
|
| 19 |
+
This method is usually used for comparing two versions. For pre-release
|
| 20 |
+
versions: alpha < beta < rc.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
version_str (str): The version string.
|
| 24 |
+
length (int): The maximum number of version levels. Default: 4.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
tuple[int]: The version info in digits (integers).
|
| 28 |
+
"""
|
| 29 |
+
version = parse(version_str)
|
| 30 |
+
assert version.release, f'failed to parse version {version_str}'
|
| 31 |
+
release = list(version.release)
|
| 32 |
+
release = release[:length]
|
| 33 |
+
if len(release) < length:
|
| 34 |
+
release = release + [0] * (length - len(release))
|
| 35 |
+
if version.is_prerelease:
|
| 36 |
+
mapping = {'a': -3, 'b': -2, 'rc': -1}
|
| 37 |
+
val = -4
|
| 38 |
+
# version.pre can be None
|
| 39 |
+
if version.pre:
|
| 40 |
+
if version.pre[0] not in mapping:
|
| 41 |
+
warnings.warn(f'unknown prerelease version {version.pre[0]}, '
|
| 42 |
+
'version checking may go wrong')
|
| 43 |
+
else:
|
| 44 |
+
val = mapping[version.pre[0]]
|
| 45 |
+
release.extend([val, version.pre[-1]])
|
| 46 |
+
else:
|
| 47 |
+
release.extend([val, 0])
|
| 48 |
+
|
| 49 |
+
elif version.is_postrelease:
|
| 50 |
+
release.extend([1, version.post])
|
| 51 |
+
else:
|
| 52 |
+
release.extend([0, 0])
|
| 53 |
+
return tuple(release)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
mmcv_min_version = digit_version(MMCV_MIN)
|
| 57 |
+
mmcv_max_version = digit_version(MMCV_MAX)
|
| 58 |
+
mmcv_version = digit_version(mmcv.__version__)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
assert (mmcv_min_version <= mmcv_version < mmcv_max_version), \
|
| 62 |
+
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
|
| 63 |
+
f'Please install mmcv>=2.0.0rc4.'
|
| 64 |
+
|
| 65 |
+
mmengine_min_version = digit_version(MMENGINE_MIN)
|
| 66 |
+
mmengine_max_version = digit_version(MMENGINE_MAX)
|
| 67 |
+
mmengine_version = digit_version(mmengine.__version__)
|
| 68 |
+
|
| 69 |
+
assert (mmengine_min_version <= mmengine_version < mmengine_max_version), \
|
| 70 |
+
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
|
| 71 |
+
f'Please install mmengine>={mmengine_min_version}, '\
|
| 72 |
+
f'<{mmengine_max_version}.'
|
| 73 |
+
|
| 74 |
+
__all__ = ['__version__', 'version_info', 'digit_version']
|
mmseg/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.98 kB). View file
|
|
|
mmseg/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (525 Bytes). View file
|
|
|
mmseg/apis/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .inference import inference_model, init_model, show_result_pyplot
|
| 3 |
+
from .mmseg_inferencer import MMSegInferencer
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'init_model', 'inference_model', 'show_result_pyplot', 'MMSegInferencer'
|
| 7 |
+
]
|
mmseg/apis/inference.py
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import warnings
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Optional, Sequence, Union
|
| 6 |
+
|
| 7 |
+
import mmcv
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from mmengine import Config
|
| 11 |
+
from mmengine.dataset import Compose
|
| 12 |
+
from mmengine.registry import init_default_scope
|
| 13 |
+
from mmengine.runner import load_checkpoint
|
| 14 |
+
from mmengine.utils import mkdir_or_exist
|
| 15 |
+
|
| 16 |
+
from mmseg.models import BaseSegmentor
|
| 17 |
+
from mmseg.registry import MODELS
|
| 18 |
+
from mmseg.structures import SegDataSample
|
| 19 |
+
from mmseg.utils import SampleList, dataset_aliases, get_classes, get_palette
|
| 20 |
+
from mmseg.visualization import SegLocalVisualizer
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def init_model(config: Union[str, Path, Config],
|
| 24 |
+
checkpoint: Optional[str] = None,
|
| 25 |
+
device: str = 'cuda:0',
|
| 26 |
+
cfg_options: Optional[dict] = None):
|
| 27 |
+
"""Initialize a segmentor from config file.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,
|
| 31 |
+
:obj:`Path`, or the config object.
|
| 32 |
+
checkpoint (str, optional): Checkpoint path. If left as None, the model
|
| 33 |
+
will not load any weights.
|
| 34 |
+
device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
|
| 35 |
+
Use 'cpu' for loading model on CPU.
|
| 36 |
+
cfg_options (dict, optional): Options to override some settings in
|
| 37 |
+
the used config.
|
| 38 |
+
Returns:
|
| 39 |
+
nn.Module: The constructed segmentor.
|
| 40 |
+
"""
|
| 41 |
+
if isinstance(config, (str, Path)):
|
| 42 |
+
config = Config.fromfile(config)
|
| 43 |
+
elif not isinstance(config, Config):
|
| 44 |
+
raise TypeError('config must be a filename or Config object, '
|
| 45 |
+
'but got {}'.format(type(config)))
|
| 46 |
+
if cfg_options is not None:
|
| 47 |
+
config.merge_from_dict(cfg_options)
|
| 48 |
+
elif 'init_cfg' in config.model.backbone:
|
| 49 |
+
config.model.backbone.init_cfg = None
|
| 50 |
+
config.model.pretrained = None
|
| 51 |
+
config.model.train_cfg = None
|
| 52 |
+
init_default_scope(config.get('default_scope', 'mmseg'))
|
| 53 |
+
|
| 54 |
+
model = MODELS.build(config.model)
|
| 55 |
+
if checkpoint is not None:
|
| 56 |
+
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
|
| 57 |
+
dataset_meta = checkpoint['meta'].get('dataset_meta', None)
|
| 58 |
+
# save the dataset_meta in the model for convenience
|
| 59 |
+
if 'dataset_meta' in checkpoint.get('meta', {}):
|
| 60 |
+
# mmseg 1.x
|
| 61 |
+
model.dataset_meta = dataset_meta
|
| 62 |
+
elif 'CLASSES' in checkpoint.get('meta', {}):
|
| 63 |
+
# < mmseg 1.x
|
| 64 |
+
classes = checkpoint['meta']['CLASSES']
|
| 65 |
+
palette = checkpoint['meta']['PALETTE']
|
| 66 |
+
model.dataset_meta = {'classes': classes, 'palette': palette}
|
| 67 |
+
else:
|
| 68 |
+
warnings.simplefilter('once')
|
| 69 |
+
warnings.warn(
|
| 70 |
+
'dataset_meta or class names are not saved in the '
|
| 71 |
+
'checkpoint\'s meta data, classes and palette will be'
|
| 72 |
+
'set according to num_classes ')
|
| 73 |
+
num_classes = model.decode_head.num_classes
|
| 74 |
+
dataset_name = None
|
| 75 |
+
for name in dataset_aliases.keys():
|
| 76 |
+
if len(get_classes(name)) == num_classes:
|
| 77 |
+
dataset_name = name
|
| 78 |
+
break
|
| 79 |
+
if dataset_name is None:
|
| 80 |
+
warnings.warn(
|
| 81 |
+
'No suitable dataset found, use Cityscapes by default')
|
| 82 |
+
dataset_name = 'cityscapes'
|
| 83 |
+
model.dataset_meta = {
|
| 84 |
+
'classes': get_classes(dataset_name),
|
| 85 |
+
'palette': get_palette(dataset_name)
|
| 86 |
+
}
|
| 87 |
+
model.cfg = config # save the config in the model for convenience
|
| 88 |
+
model.to(device)
|
| 89 |
+
model.eval()
|
| 90 |
+
return model
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
ImageType = Union[str, np.ndarray, Sequence[str], Sequence[np.ndarray]]
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _preprare_data(imgs: ImageType, model: BaseSegmentor):
|
| 97 |
+
|
| 98 |
+
cfg = model.cfg
|
| 99 |
+
for t in cfg.test_pipeline:
|
| 100 |
+
if t.get('type') == 'LoadAnnotations':
|
| 101 |
+
cfg.test_pipeline.remove(t)
|
| 102 |
+
|
| 103 |
+
is_batch = True
|
| 104 |
+
if not isinstance(imgs, (list, tuple)):
|
| 105 |
+
imgs = [imgs]
|
| 106 |
+
is_batch = False
|
| 107 |
+
|
| 108 |
+
if isinstance(imgs[0], np.ndarray):
|
| 109 |
+
cfg.test_pipeline[0]['type'] = 'LoadImageFromNDArray'
|
| 110 |
+
|
| 111 |
+
# TODO: Consider using the singleton pattern to avoid building
|
| 112 |
+
# a pipeline for each inference
|
| 113 |
+
pipeline = Compose(cfg.test_pipeline)
|
| 114 |
+
|
| 115 |
+
data = defaultdict(list)
|
| 116 |
+
for img in imgs:
|
| 117 |
+
if isinstance(img, np.ndarray):
|
| 118 |
+
data_ = dict(img=img)
|
| 119 |
+
else:
|
| 120 |
+
data_ = dict(img_path=img)
|
| 121 |
+
data_ = pipeline(data_)
|
| 122 |
+
data['inputs'].append(data_['inputs'])
|
| 123 |
+
data['data_samples'].append(data_['data_samples'])
|
| 124 |
+
|
| 125 |
+
return data, is_batch
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def inference_model(model: BaseSegmentor,
|
| 129 |
+
img: ImageType) -> Union[SegDataSample, SampleList]:
|
| 130 |
+
"""Inference image(s) with the segmentor.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
model (nn.Module): The loaded segmentor.
|
| 134 |
+
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
|
| 135 |
+
images.
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
:obj:`SegDataSample` or list[:obj:`SegDataSample`]:
|
| 139 |
+
If imgs is a list or tuple, the same length list type results
|
| 140 |
+
will be returned, otherwise return the segmentation results directly.
|
| 141 |
+
"""
|
| 142 |
+
# prepare data
|
| 143 |
+
data, is_batch = _preprare_data(img, model)
|
| 144 |
+
|
| 145 |
+
# forward the model
|
| 146 |
+
with torch.no_grad():
|
| 147 |
+
results = model.test_step(data)
|
| 148 |
+
|
| 149 |
+
return results if is_batch else results[0]
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def show_result_pyplot(model: BaseSegmentor,
|
| 153 |
+
img: Union[str, np.ndarray],
|
| 154 |
+
result: SegDataSample,
|
| 155 |
+
opacity: float = 0.5,
|
| 156 |
+
title: str = '',
|
| 157 |
+
draw_gt: bool = True,
|
| 158 |
+
draw_pred: bool = True,
|
| 159 |
+
wait_time: float = 0,
|
| 160 |
+
show: bool = True,
|
| 161 |
+
save_dir=None,
|
| 162 |
+
out_file=None):
|
| 163 |
+
"""Visualize the segmentation results on the image.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
model (nn.Module): The loaded segmentor.
|
| 167 |
+
img (str or np.ndarray): Image filename or loaded image.
|
| 168 |
+
result (SegDataSample): The prediction SegDataSample result.
|
| 169 |
+
opacity(float): Opacity of painted segmentation map.
|
| 170 |
+
Default 0.5. Must be in (0, 1] range.
|
| 171 |
+
title (str): The title of pyplot figure.
|
| 172 |
+
Default is ''.
|
| 173 |
+
draw_gt (bool): Whether to draw GT SegDataSample. Default to True.
|
| 174 |
+
draw_pred (bool): Whether to draw Prediction SegDataSample.
|
| 175 |
+
Defaults to True.
|
| 176 |
+
wait_time (float): The interval of show (s). 0 is the special value
|
| 177 |
+
that means "forever". Defaults to 0.
|
| 178 |
+
show (bool): Whether to display the drawn image.
|
| 179 |
+
Default to True.
|
| 180 |
+
save_dir (str, optional): Save file dir for all storage backends.
|
| 181 |
+
If it is None, the backend storage will not save any data.
|
| 182 |
+
out_file (str, optional): Path to output file. Default to None.
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
np.ndarray: the drawn image which channel is RGB.
|
| 186 |
+
"""
|
| 187 |
+
if hasattr(model, 'module'):
|
| 188 |
+
model = model.module
|
| 189 |
+
if isinstance(img, str):
|
| 190 |
+
image = mmcv.imread(img)
|
| 191 |
+
else:
|
| 192 |
+
image = img
|
| 193 |
+
if save_dir is not None:
|
| 194 |
+
mkdir_or_exist(save_dir)
|
| 195 |
+
# init visualizer
|
| 196 |
+
visualizer = SegLocalVisualizer(
|
| 197 |
+
vis_backends=[dict(type='LocalVisBackend')],
|
| 198 |
+
save_dir=save_dir,
|
| 199 |
+
alpha=opacity)
|
| 200 |
+
visualizer.dataset_meta = dict(
|
| 201 |
+
classes=model.dataset_meta['classes'],
|
| 202 |
+
palette=model.dataset_meta['palette'])
|
| 203 |
+
visualizer.add_datasample(
|
| 204 |
+
name=title,
|
| 205 |
+
image=image,
|
| 206 |
+
data_sample=result,
|
| 207 |
+
draw_gt=draw_gt,
|
| 208 |
+
draw_pred=draw_pred,
|
| 209 |
+
wait_time=wait_time,
|
| 210 |
+
out_file=out_file,
|
| 211 |
+
show=show)
|
| 212 |
+
vis_img = visualizer.get_image()
|
| 213 |
+
|
| 214 |
+
return vis_img
|
mmseg/apis/mmseg_inferencer.py
ADDED
|
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import warnings
|
| 4 |
+
from typing import List, Optional, Sequence, Union
|
| 5 |
+
|
| 6 |
+
import mmcv
|
| 7 |
+
import mmengine
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
from mmcv.transforms import Compose
|
| 12 |
+
from mmengine.infer.infer import BaseInferencer, ModelType
|
| 13 |
+
from mmengine.model import revert_sync_batchnorm
|
| 14 |
+
from mmengine.registry import init_default_scope
|
| 15 |
+
from mmengine.runner.checkpoint import _load_checkpoint_to_model
|
| 16 |
+
from PIL import Image
|
| 17 |
+
|
| 18 |
+
from mmseg.structures import SegDataSample
|
| 19 |
+
from mmseg.utils import ConfigType, SampleList, get_classes, get_palette
|
| 20 |
+
from mmseg.visualization import SegLocalVisualizer
|
| 21 |
+
|
| 22 |
+
InputType = Union[str, np.ndarray]
|
| 23 |
+
InputsType = Union[InputType, Sequence[InputType]]
|
| 24 |
+
PredType = Union[SegDataSample, SampleList]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class MMSegInferencer(BaseInferencer):
|
| 28 |
+
"""Semantic segmentation inferencer, provides inference and visualization
|
| 29 |
+
interfaces. Note: MMEngine >= 0.5.0 is required.
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
model (str, optional): Path to the config file or the model name
|
| 33 |
+
defined in metafile. Take the `mmseg metafile <https://github.com/open-mmlab/mmsegmentation/blob/main/configs/fcn/metafile.yaml>`_
|
| 34 |
+
as an example the `model` could be
|
| 35 |
+
"fcn_r50-d8_4xb2-40k_cityscapes-512x1024", and the weights of model
|
| 36 |
+
will be download automatically. If use config file, like
|
| 37 |
+
"configs/fcn/fcn_r50-d8_4xb2-40k_cityscapes-512x1024.py", the
|
| 38 |
+
`weights` should be defined.
|
| 39 |
+
weights (str, optional): Path to the checkpoint. If it is not specified
|
| 40 |
+
and model is a model name of metafile, the weights will be loaded
|
| 41 |
+
from metafile. Defaults to None.
|
| 42 |
+
classes (list, optional): Input classes for result rendering, as the
|
| 43 |
+
prediction of segmentation model is a segment map with label
|
| 44 |
+
indices, `classes` is a list which includes items responding to the
|
| 45 |
+
label indices. If classes is not defined, visualizer will take
|
| 46 |
+
`cityscapes` classes by default. Defaults to None.
|
| 47 |
+
palette (list, optional): Input palette for result rendering, which is
|
| 48 |
+
a list of color palette responding to the classes. If palette is
|
| 49 |
+
not defined, visualizer will take `cityscapes` palette by default.
|
| 50 |
+
Defaults to None.
|
| 51 |
+
dataset_name (str, optional): `Dataset name or alias <https://github.com/open-mmlab/mmsegmentation/blob/main/mmseg/utils/class_names.py#L302-L317>`_
|
| 52 |
+
visulizer will use the meta information of the dataset i.e. classes
|
| 53 |
+
and palette, but the `classes` and `palette` have higher priority.
|
| 54 |
+
Defaults to None.
|
| 55 |
+
device (str, optional): Device to run inference. If None, the available
|
| 56 |
+
device will be automatically used. Defaults to None.
|
| 57 |
+
scope (str, optional): The scope of the model. Defaults to 'mmseg'.
|
| 58 |
+
""" # noqa
|
| 59 |
+
|
| 60 |
+
preprocess_kwargs: set = set()
|
| 61 |
+
forward_kwargs: set = {'mode', 'out_dir'}
|
| 62 |
+
visualize_kwargs: set = {'show', 'wait_time', 'img_out_dir', 'opacity'}
|
| 63 |
+
postprocess_kwargs: set = {'pred_out_dir', 'return_datasample'}
|
| 64 |
+
|
| 65 |
+
def __init__(self,
|
| 66 |
+
model: Union[ModelType, str],
|
| 67 |
+
weights: Optional[str] = None,
|
| 68 |
+
classes: Optional[Union[str, List]] = None,
|
| 69 |
+
palette: Optional[Union[str, List]] = None,
|
| 70 |
+
dataset_name: Optional[str] = None,
|
| 71 |
+
device: Optional[str] = None,
|
| 72 |
+
scope: Optional[str] = 'mmseg') -> None:
|
| 73 |
+
# A global counter tracking the number of images processes, for
|
| 74 |
+
# naming of the output images
|
| 75 |
+
self.num_visualized_imgs = 0
|
| 76 |
+
self.num_pred_imgs = 0
|
| 77 |
+
init_default_scope(scope if scope else 'mmseg')
|
| 78 |
+
super().__init__(
|
| 79 |
+
model=model, weights=weights, device=device, scope=scope)
|
| 80 |
+
|
| 81 |
+
if device == 'cpu' or not torch.cuda.is_available():
|
| 82 |
+
self.model = revert_sync_batchnorm(self.model)
|
| 83 |
+
|
| 84 |
+
assert isinstance(self.visualizer, SegLocalVisualizer)
|
| 85 |
+
self.visualizer.set_dataset_meta(palette, classes, dataset_name)
|
| 86 |
+
|
| 87 |
+
def _load_weights_to_model(self, model: nn.Module,
|
| 88 |
+
checkpoint: Optional[dict],
|
| 89 |
+
cfg: Optional[ConfigType]) -> None:
|
| 90 |
+
"""Loading model weights and meta information from cfg and checkpoint.
|
| 91 |
+
|
| 92 |
+
Subclasses could override this method to load extra meta information
|
| 93 |
+
from ``checkpoint`` and ``cfg`` to model.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
model (nn.Module): Model to load weights and meta information.
|
| 97 |
+
checkpoint (dict, optional): The loaded checkpoint.
|
| 98 |
+
cfg (Config or ConfigDict, optional): The loaded config.
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
if checkpoint is not None:
|
| 102 |
+
_load_checkpoint_to_model(model, checkpoint)
|
| 103 |
+
checkpoint_meta = checkpoint.get('meta', {})
|
| 104 |
+
# save the dataset_meta in the model for convenience
|
| 105 |
+
if 'dataset_meta' in checkpoint_meta:
|
| 106 |
+
# mmsegmentation 1.x
|
| 107 |
+
model.dataset_meta = {
|
| 108 |
+
'classes': checkpoint_meta['dataset_meta'].get('classes'),
|
| 109 |
+
'palette': checkpoint_meta['dataset_meta'].get('palette')
|
| 110 |
+
}
|
| 111 |
+
elif 'CLASSES' in checkpoint_meta:
|
| 112 |
+
# mmsegmentation 0.x
|
| 113 |
+
classes = checkpoint_meta['CLASSES']
|
| 114 |
+
palette = checkpoint_meta.get('PALETTE', None)
|
| 115 |
+
model.dataset_meta = {'classes': classes, 'palette': palette}
|
| 116 |
+
else:
|
| 117 |
+
warnings.warn(
|
| 118 |
+
'dataset_meta or class names are not saved in the '
|
| 119 |
+
'checkpoint\'s meta data, use classes of Cityscapes by '
|
| 120 |
+
'default.')
|
| 121 |
+
model.dataset_meta = {
|
| 122 |
+
'classes': get_classes('cityscapes'),
|
| 123 |
+
'palette': get_palette('cityscapes')
|
| 124 |
+
}
|
| 125 |
+
else:
|
| 126 |
+
warnings.warn('Checkpoint is not loaded, and the inference '
|
| 127 |
+
'result is calculated by the randomly initialized '
|
| 128 |
+
'model!')
|
| 129 |
+
warnings.warn(
|
| 130 |
+
'weights is None, use cityscapes classes by default.')
|
| 131 |
+
model.dataset_meta = {
|
| 132 |
+
'classes': get_classes('cityscapes'),
|
| 133 |
+
'palette': get_palette('cityscapes')
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
def __call__(self,
|
| 137 |
+
inputs: InputsType,
|
| 138 |
+
return_datasamples: bool = False,
|
| 139 |
+
batch_size: int = 1,
|
| 140 |
+
show: bool = False,
|
| 141 |
+
wait_time: int = 0,
|
| 142 |
+
out_dir: str = '',
|
| 143 |
+
img_out_dir: str = 'vis',
|
| 144 |
+
pred_out_dir: str = 'pred',
|
| 145 |
+
**kwargs) -> dict:
|
| 146 |
+
"""Call the inferencer.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
inputs (Union[list, str, np.ndarray]): Inputs for the inferencer.
|
| 150 |
+
return_datasamples (bool): Whether to return results as
|
| 151 |
+
:obj:`SegDataSample`. Defaults to False.
|
| 152 |
+
batch_size (int): Batch size. Defaults to 1.
|
| 153 |
+
show (bool): Whether to display the rendering color segmentation
|
| 154 |
+
mask in a popup window. Defaults to False.
|
| 155 |
+
wait_time (float): The interval of show (s). Defaults to 0.
|
| 156 |
+
out_dir (str): Output directory of inference results. Defaults
|
| 157 |
+
to ''.
|
| 158 |
+
img_out_dir (str): Subdirectory of `out_dir`, used to save
|
| 159 |
+
rendering color segmentation mask, so `out_dir` must be defined
|
| 160 |
+
if you would like to save predicted mask. Defaults to 'vis'.
|
| 161 |
+
pred_out_dir (str): Subdirectory of `out_dir`, used to save
|
| 162 |
+
predicted mask file, so `out_dir` must be defined if you would
|
| 163 |
+
like to save predicted mask. Defaults to 'pred'.
|
| 164 |
+
|
| 165 |
+
**kwargs: Other keyword arguments passed to :meth:`preprocess`,
|
| 166 |
+
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
|
| 167 |
+
Each key in kwargs should be in the corresponding set of
|
| 168 |
+
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
|
| 169 |
+
and ``postprocess_kwargs``.
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
dict: Inference and visualization results.
|
| 174 |
+
"""
|
| 175 |
+
|
| 176 |
+
if out_dir != '':
|
| 177 |
+
pred_out_dir = osp.join(out_dir, pred_out_dir)
|
| 178 |
+
img_out_dir = osp.join(out_dir, img_out_dir)
|
| 179 |
+
else:
|
| 180 |
+
pred_out_dir = ''
|
| 181 |
+
img_out_dir = ''
|
| 182 |
+
|
| 183 |
+
return super().__call__(
|
| 184 |
+
inputs=inputs,
|
| 185 |
+
return_datasamples=return_datasamples,
|
| 186 |
+
batch_size=batch_size,
|
| 187 |
+
show=show,
|
| 188 |
+
wait_time=wait_time,
|
| 189 |
+
img_out_dir=img_out_dir,
|
| 190 |
+
pred_out_dir=pred_out_dir,
|
| 191 |
+
**kwargs)
|
| 192 |
+
|
| 193 |
+
def visualize(self,
|
| 194 |
+
inputs: list,
|
| 195 |
+
preds: List[dict],
|
| 196 |
+
show: bool = False,
|
| 197 |
+
wait_time: int = 0,
|
| 198 |
+
img_out_dir: str = '',
|
| 199 |
+
opacity: float = 0.8) -> List[np.ndarray]:
|
| 200 |
+
"""Visualize predictions.
|
| 201 |
+
|
| 202 |
+
Args:
|
| 203 |
+
inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`.
|
| 204 |
+
preds (Any): Predictions of the model.
|
| 205 |
+
show (bool): Whether to display the image in a popup window.
|
| 206 |
+
Defaults to False.
|
| 207 |
+
wait_time (float): The interval of show (s). Defaults to 0.
|
| 208 |
+
img_out_dir (str): Output directory of rendering prediction i.e.
|
| 209 |
+
color segmentation mask. Defaults: ''
|
| 210 |
+
opacity (int, float): The transparency of segmentation mask.
|
| 211 |
+
Defaults to 0.8.
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
List[np.ndarray]: Visualization results.
|
| 215 |
+
"""
|
| 216 |
+
if self.visualizer is None or (not show and img_out_dir == ''):
|
| 217 |
+
return None
|
| 218 |
+
|
| 219 |
+
if getattr(self, 'visualizer') is None:
|
| 220 |
+
raise ValueError('Visualization needs the "visualizer" term'
|
| 221 |
+
'defined in the config, but got None')
|
| 222 |
+
self.visualizer.set_dataset_meta(**self.model.dataset_meta)
|
| 223 |
+
self.visualizer.alpha = opacity
|
| 224 |
+
|
| 225 |
+
results = []
|
| 226 |
+
|
| 227 |
+
for single_input, pred in zip(inputs, preds):
|
| 228 |
+
if isinstance(single_input, str):
|
| 229 |
+
img_bytes = mmengine.fileio.get(single_input)
|
| 230 |
+
img = mmcv.imfrombytes(img_bytes)
|
| 231 |
+
img = img[:, :, ::-1]
|
| 232 |
+
img_name = osp.basename(single_input)
|
| 233 |
+
elif isinstance(single_input, np.ndarray):
|
| 234 |
+
img = single_input.copy()
|
| 235 |
+
img_num = str(self.num_visualized_imgs).zfill(8) + '_vis'
|
| 236 |
+
img_name = f'{img_num}.jpg'
|
| 237 |
+
else:
|
| 238 |
+
raise ValueError('Unsupported input type:'
|
| 239 |
+
f'{type(single_input)}')
|
| 240 |
+
|
| 241 |
+
out_file = osp.join(img_out_dir, img_name) if img_out_dir != ''\
|
| 242 |
+
else None
|
| 243 |
+
|
| 244 |
+
self.visualizer.add_datasample(
|
| 245 |
+
img_name,
|
| 246 |
+
img,
|
| 247 |
+
pred,
|
| 248 |
+
show=show,
|
| 249 |
+
wait_time=wait_time,
|
| 250 |
+
draw_gt=False,
|
| 251 |
+
draw_pred=True,
|
| 252 |
+
out_file=out_file)
|
| 253 |
+
results.append(self.visualizer.get_image())
|
| 254 |
+
self.num_visualized_imgs += 1
|
| 255 |
+
|
| 256 |
+
return results
|
| 257 |
+
|
| 258 |
+
def postprocess(self,
|
| 259 |
+
preds: PredType,
|
| 260 |
+
visualization: List[np.ndarray],
|
| 261 |
+
return_datasample: bool = False,
|
| 262 |
+
pred_out_dir: str = '') -> dict:
|
| 263 |
+
"""Process the predictions and visualization results from ``forward``
|
| 264 |
+
and ``visualize``.
|
| 265 |
+
|
| 266 |
+
This method should be responsible for the following tasks:
|
| 267 |
+
|
| 268 |
+
1. Pack the predictions and visualization results and return them.
|
| 269 |
+
2. Save the predictions, if it needed.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
preds (List[Dict]): Predictions of the model.
|
| 273 |
+
visualization (List[np.ndarray]): The list of rendering color
|
| 274 |
+
segmentation mask.
|
| 275 |
+
return_datasample (bool): Whether to return results as datasamples.
|
| 276 |
+
Defaults to False.
|
| 277 |
+
pred_out_dir: File to save the inference results w/o
|
| 278 |
+
visualization. If left as empty, no file will be saved.
|
| 279 |
+
Defaults to ''.
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
dict: Inference and visualization results with key ``predictions``
|
| 283 |
+
and ``visualization``
|
| 284 |
+
|
| 285 |
+
- ``visualization (Any)``: Returned by :meth:`visualize`
|
| 286 |
+
- ``predictions`` (List[np.ndarray], np.ndarray): Returned by
|
| 287 |
+
:meth:`forward` and processed in :meth:`postprocess`.
|
| 288 |
+
If ``return_datasample=False``, it will be the segmentation mask
|
| 289 |
+
with label indice.
|
| 290 |
+
"""
|
| 291 |
+
if return_datasample:
|
| 292 |
+
if len(preds) == 1:
|
| 293 |
+
return preds[0]
|
| 294 |
+
else:
|
| 295 |
+
return preds
|
| 296 |
+
|
| 297 |
+
results_dict = {}
|
| 298 |
+
|
| 299 |
+
results_dict['predictions'] = []
|
| 300 |
+
results_dict['visualization'] = []
|
| 301 |
+
|
| 302 |
+
for i, pred in enumerate(preds):
|
| 303 |
+
pred_data = pred.pred_sem_seg.numpy().data[0]
|
| 304 |
+
results_dict['predictions'].append(pred_data)
|
| 305 |
+
if visualization is not None:
|
| 306 |
+
vis = visualization[i]
|
| 307 |
+
results_dict['visualization'].append(vis)
|
| 308 |
+
if pred_out_dir != '':
|
| 309 |
+
mmengine.mkdir_or_exist(pred_out_dir)
|
| 310 |
+
img_name = str(self.num_pred_imgs).zfill(8) + '_pred.png'
|
| 311 |
+
img_path = osp.join(pred_out_dir, img_name)
|
| 312 |
+
output = Image.fromarray(pred_data.astype(np.uint8))
|
| 313 |
+
output.save(img_path)
|
| 314 |
+
self.num_pred_imgs += 1
|
| 315 |
+
|
| 316 |
+
if len(results_dict['predictions']) == 1:
|
| 317 |
+
results_dict['predictions'] = results_dict['predictions'][0]
|
| 318 |
+
if visualization is not None:
|
| 319 |
+
results_dict['visualization'] = \
|
| 320 |
+
results_dict['visualization'][0]
|
| 321 |
+
return results_dict
|
| 322 |
+
|
| 323 |
+
def _init_pipeline(self, cfg: ConfigType) -> Compose:
|
| 324 |
+
"""Initialize the test pipeline.
|
| 325 |
+
|
| 326 |
+
Return a pipeline to handle various input data, such as ``str``,
|
| 327 |
+
``np.ndarray``. It is an abstract method in BaseInferencer, and should
|
| 328 |
+
be implemented in subclasses.
|
| 329 |
+
|
| 330 |
+
The returned pipeline will be used to process a single data.
|
| 331 |
+
It will be used in :meth:`preprocess` like this:
|
| 332 |
+
|
| 333 |
+
.. code-block:: python
|
| 334 |
+
def preprocess(self, inputs, batch_size, **kwargs):
|
| 335 |
+
...
|
| 336 |
+
dataset = map(self.pipeline, dataset)
|
| 337 |
+
...
|
| 338 |
+
"""
|
| 339 |
+
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
|
| 340 |
+
# Loading annotations is also not applicable
|
| 341 |
+
idx = self._get_transform_idx(pipeline_cfg, 'LoadAnnotations')
|
| 342 |
+
if idx != -1:
|
| 343 |
+
del pipeline_cfg[idx]
|
| 344 |
+
load_img_idx = self._get_transform_idx(pipeline_cfg,
|
| 345 |
+
'LoadImageFromFile')
|
| 346 |
+
|
| 347 |
+
if load_img_idx == -1:
|
| 348 |
+
raise ValueError(
|
| 349 |
+
'LoadImageFromFile is not found in the test pipeline')
|
| 350 |
+
pipeline_cfg[load_img_idx]['type'] = 'InferencerLoader'
|
| 351 |
+
return Compose(pipeline_cfg)
|
| 352 |
+
|
| 353 |
+
def _get_transform_idx(self, pipeline_cfg: ConfigType, name: str) -> int:
|
| 354 |
+
"""Returns the index of the transform in a pipeline.
|
| 355 |
+
|
| 356 |
+
If the transform is not found, returns -1.
|
| 357 |
+
"""
|
| 358 |
+
for i, transform in enumerate(pipeline_cfg):
|
| 359 |
+
if transform['type'] == name:
|
| 360 |
+
return i
|
| 361 |
+
return -1
|
mmseg/datasets/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
# yapf: disable
|
| 3 |
+
from .ade import ADE20KDataset
|
| 4 |
+
from .basesegdataset import BaseSegDataset
|
| 5 |
+
from .chase_db1 import ChaseDB1Dataset
|
| 6 |
+
from .cityscapes import CityscapesDataset
|
| 7 |
+
from .coco_stuff import COCOStuffDataset
|
| 8 |
+
from .dark_zurich import DarkZurichDataset
|
| 9 |
+
from .dataset_wrappers import MultiImageMixDataset
|
| 10 |
+
from .decathlon import DecathlonDataset
|
| 11 |
+
from .drive import DRIVEDataset
|
| 12 |
+
from .hrf import HRFDataset
|
| 13 |
+
from .isaid import iSAIDDataset
|
| 14 |
+
from .isprs import ISPRSDataset
|
| 15 |
+
from .lip import LIPDataset
|
| 16 |
+
from .loveda import LoveDADataset
|
| 17 |
+
from .mapillary import MapillaryDataset_v1, MapillaryDataset_v2
|
| 18 |
+
from .night_driving import NightDrivingDataset
|
| 19 |
+
from .pascal_context import PascalContextDataset, PascalContextDataset59
|
| 20 |
+
from .potsdam import PotsdamDataset
|
| 21 |
+
from .refuge import REFUGEDataset
|
| 22 |
+
from .stare import STAREDataset
|
| 23 |
+
from .synapse import SynapseDataset
|
| 24 |
+
# yapf: disable
|
| 25 |
+
from .transforms import (CLAHE, AdjustGamma, BioMedical3DPad,
|
| 26 |
+
BioMedical3DRandomCrop, BioMedical3DRandomFlip,
|
| 27 |
+
BioMedicalGaussianBlur, BioMedicalGaussianNoise,
|
| 28 |
+
BioMedicalRandomGamma, GenerateEdge, LoadAnnotations,
|
| 29 |
+
LoadBiomedicalAnnotation, LoadBiomedicalData,
|
| 30 |
+
LoadBiomedicalImageFromFile, LoadImageFromNDArray,
|
| 31 |
+
PackSegInputs, PhotoMetricDistortion, RandomCrop,
|
| 32 |
+
RandomCutOut, RandomMosaic, RandomRotate,
|
| 33 |
+
RandomRotFlip, Rerange, ResizeShortestEdge,
|
| 34 |
+
ResizeToMultiple, RGB2Gray, SegRescale)
|
| 35 |
+
from .voc import PascalVOCDataset
|
| 36 |
+
|
| 37 |
+
# yapf: enable
|
| 38 |
+
__all__ = [
|
| 39 |
+
'BaseSegDataset', 'BioMedical3DRandomCrop', 'BioMedical3DRandomFlip',
|
| 40 |
+
'CityscapesDataset', 'PascalVOCDataset', 'ADE20KDataset',
|
| 41 |
+
'PascalContextDataset', 'PascalContextDataset59', 'ChaseDB1Dataset',
|
| 42 |
+
'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset',
|
| 43 |
+
'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset',
|
| 44 |
+
'MultiImageMixDataset', 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset',
|
| 45 |
+
'LoadAnnotations', 'RandomCrop', 'SegRescale', 'PhotoMetricDistortion',
|
| 46 |
+
'RandomRotate', 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray',
|
| 47 |
+
'RandomCutOut', 'RandomMosaic', 'PackSegInputs', 'ResizeToMultiple',
|
| 48 |
+
'LoadImageFromNDArray', 'LoadBiomedicalImageFromFile',
|
| 49 |
+
'LoadBiomedicalAnnotation', 'LoadBiomedicalData', 'GenerateEdge',
|
| 50 |
+
'DecathlonDataset', 'LIPDataset', 'ResizeShortestEdge',
|
| 51 |
+
'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur',
|
| 52 |
+
'BioMedicalRandomGamma', 'BioMedical3DPad', 'RandomRotFlip',
|
| 53 |
+
'SynapseDataset', 'REFUGEDataset', 'MapillaryDataset_v1',
|
| 54 |
+
'MapillaryDataset_v2'
|
| 55 |
+
]
|
mmseg/datasets/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.25 kB). View file
|
|
|
mmseg/datasets/__pycache__/ade.cpython-310.pyc
ADDED
|
Binary file (1.15 kB). View file
|
|
|
mmseg/datasets/__pycache__/basesegdataset.cpython-310.pyc
ADDED
|
Binary file (9.23 kB). View file
|
|
|
mmseg/datasets/__pycache__/chase_db1.cpython-310.pyc
ADDED
|
Binary file (1.29 kB). View file
|
|
|
mmseg/datasets/__pycache__/cityscapes.cpython-310.pyc
ADDED
|
Binary file (1.65 kB). View file
|
|
|
mmseg/datasets/__pycache__/coco_stuff.cpython-310.pyc
ADDED
|
Binary file (7.68 kB). View file
|
|
|
mmseg/datasets/__pycache__/dark_zurich.cpython-310.pyc
ADDED
|
Binary file (833 Bytes). View file
|
|
|
mmseg/datasets/__pycache__/dataset_wrappers.cpython-310.pyc
ADDED
|
Binary file (4.51 kB). View file
|
|
|
mmseg/datasets/__pycache__/decathlon.cpython-310.pyc
ADDED
|
Binary file (2.65 kB). View file
|
|
|
mmseg/datasets/__pycache__/drive.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
mmseg/datasets/__pycache__/hrf.cpython-310.pyc
ADDED
|
Binary file (1.24 kB). View file
|
|
|
mmseg/datasets/__pycache__/isaid.cpython-310.pyc
ADDED
|
Binary file (1.91 kB). View file
|
|
|
mmseg/datasets/__pycache__/isprs.cpython-310.pyc
ADDED
|
Binary file (1.27 kB). View file
|
|
|
mmseg/datasets/__pycache__/lip.cpython-310.pyc
ADDED
|
Binary file (1.6 kB). View file
|
|
|
mmseg/datasets/__pycache__/loveda.cpython-310.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|
mmseg/datasets/__pycache__/mapillary.cpython-310.pyc
ADDED
|
Binary file (8.38 kB). View file
|
|
|
mmseg/datasets/__pycache__/night_driving.cpython-310.pyc
ADDED
|
Binary file (850 Bytes). View file
|
|
|
mmseg/datasets/__pycache__/pascal_context.cpython-310.pyc
ADDED
|
Binary file (5.31 kB). View file
|
|
|
mmseg/datasets/__pycache__/potsdam.cpython-310.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|
mmseg/datasets/__pycache__/refuge.cpython-310.pyc
ADDED
|
Binary file (1.31 kB). View file
|
|
|
mmseg/datasets/__pycache__/stare.cpython-310.pyc
ADDED
|
Binary file (1.26 kB). View file
|
|
|
mmseg/datasets/__pycache__/synapse.cpython-310.pyc
ADDED
|
Binary file (1.5 kB). View file
|
|
|
mmseg/datasets/__pycache__/voc.cpython-310.pyc
ADDED
|
Binary file (1.79 kB). View file
|
|
|
mmseg/datasets/ade.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class ADE20KDataset(BaseSegDataset):
|
| 8 |
+
"""ADE20K dataset.
|
| 9 |
+
|
| 10 |
+
In segmentation map annotation for ADE20K, 0 stands for background, which
|
| 11 |
+
is not included in 150 categories. ``reduce_zero_label`` is fixed to True.
|
| 12 |
+
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
|
| 13 |
+
'.png'.
|
| 14 |
+
"""
|
| 15 |
+
# METAINFO = dict(
|
| 16 |
+
# classes=('wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road',
|
| 17 |
+
# 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk',
|
| 18 |
+
# 'person', 'earth', 'door', 'table', 'mountain', 'plant',
|
| 19 |
+
# 'curtain', 'chair', 'car', 'water', 'painting', 'sofa',
|
| 20 |
+
# 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair',
|
| 21 |
+
# 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp',
|
| 22 |
+
# 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
|
| 23 |
+
# 'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
|
| 24 |
+
# 'skyscraper', 'fireplace', 'refrigerator', 'grandstand',
|
| 25 |
+
# 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow',
|
| 26 |
+
# 'screen door', 'stairway', 'river', 'bridge', 'bookcase',
|
| 27 |
+
# 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill',
|
| 28 |
+
# 'bench', 'countertop', 'stove', 'palm', 'kitchen island',
|
| 29 |
+
# 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine',
|
| 30 |
+
# 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
|
| 31 |
+
# 'chandelier', 'awning', 'streetlight', 'booth',
|
| 32 |
+
# 'television receiver', 'airplane', 'dirt track', 'apparel',
|
| 33 |
+
# 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle',
|
| 34 |
+
# 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain',
|
| 35 |
+
# 'conveyer belt', 'canopy', 'washer', 'plaything',
|
| 36 |
+
# 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall',
|
| 37 |
+
# 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food',
|
| 38 |
+
# 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal',
|
| 39 |
+
# 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket',
|
| 40 |
+
# 'sculpture', 'hood', 'sconce', 'vase', 'traffic light',
|
| 41 |
+
# 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate',
|
| 42 |
+
# 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
|
| 43 |
+
# 'clock', 'flag'),
|
| 44 |
+
# palette=[[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 45 |
+
# [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 46 |
+
# [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 47 |
+
# [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 48 |
+
# [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 49 |
+
# [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 50 |
+
# [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 51 |
+
# [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 52 |
+
# [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 53 |
+
# [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 54 |
+
# [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 55 |
+
# [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 56 |
+
# [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 57 |
+
# [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 58 |
+
# [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
| 59 |
+
# [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
| 60 |
+
# [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
| 61 |
+
# [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
| 62 |
+
# [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
| 63 |
+
# [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
| 64 |
+
# [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
| 65 |
+
# [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
| 66 |
+
# [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
| 67 |
+
# [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
| 68 |
+
# [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
| 69 |
+
# [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
| 70 |
+
# [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
| 71 |
+
# [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
| 72 |
+
# [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
| 73 |
+
# [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
| 74 |
+
# [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
| 75 |
+
# [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
| 76 |
+
# [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
| 77 |
+
# [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
| 78 |
+
# [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
| 79 |
+
# [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
| 80 |
+
# [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
| 81 |
+
# [102, 255, 0], [92, 0, 255]])
|
| 82 |
+
|
| 83 |
+
METAINFO = dict(classes=('building',), palette=[(0, 0, 255)])
|
| 84 |
+
|
| 85 |
+
def __init__(self,
|
| 86 |
+
img_suffix='.jpg',
|
| 87 |
+
seg_map_suffix='.png',
|
| 88 |
+
reduce_zero_label=True,
|
| 89 |
+
**kwargs) -> None:
|
| 90 |
+
super().__init__(
|
| 91 |
+
img_suffix=img_suffix,
|
| 92 |
+
seg_map_suffix=seg_map_suffix,
|
| 93 |
+
reduce_zero_label=reduce_zero_label,
|
| 94 |
+
**kwargs)
|
mmseg/datasets/basesegdataset.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import os.path as osp
|
| 4 |
+
from typing import Callable, Dict, List, Optional, Sequence, Union
|
| 5 |
+
|
| 6 |
+
import mmengine
|
| 7 |
+
import mmengine.fileio as fileio
|
| 8 |
+
import numpy as np
|
| 9 |
+
from mmengine.dataset import BaseDataset, Compose
|
| 10 |
+
|
| 11 |
+
from mmseg.registry import DATASETS
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@DATASETS.register_module()
|
| 15 |
+
class BaseSegDataset(BaseDataset):
|
| 16 |
+
"""Custom dataset for semantic segmentation. An example of file structure
|
| 17 |
+
is as followed.
|
| 18 |
+
|
| 19 |
+
.. code-block:: none
|
| 20 |
+
|
| 21 |
+
├── data
|
| 22 |
+
│ ├── my_dataset
|
| 23 |
+
│ │ ├── img_dir
|
| 24 |
+
│ │ │ ├── train
|
| 25 |
+
│ │ │ │ ├── xxx{img_suffix}
|
| 26 |
+
│ │ │ │ ├── yyy{img_suffix}
|
| 27 |
+
│ │ │ │ ├── zzz{img_suffix}
|
| 28 |
+
│ │ │ ├── val
|
| 29 |
+
│ │ ├── ann_dir
|
| 30 |
+
│ │ │ ├── train
|
| 31 |
+
│ │ │ │ ├── xxx{seg_map_suffix}
|
| 32 |
+
│ │ │ │ ├── yyy{seg_map_suffix}
|
| 33 |
+
│ │ │ │ ├── zzz{seg_map_suffix}
|
| 34 |
+
│ │ │ ├── val
|
| 35 |
+
|
| 36 |
+
The img/gt_semantic_seg pair of BaseSegDataset should be of the same
|
| 37 |
+
except suffix. A valid img/gt_semantic_seg filename pair should be like
|
| 38 |
+
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
|
| 39 |
+
in the suffix). If split is given, then ``xxx`` is specified in txt file.
|
| 40 |
+
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
|
| 41 |
+
Please refer to ``docs/en/tutorials/new_dataset.md`` for more details.
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
ann_file (str): Annotation file path. Defaults to ''.
|
| 46 |
+
metainfo (dict, optional): Meta information for dataset, such as
|
| 47 |
+
specify classes to load. Defaults to None.
|
| 48 |
+
data_root (str, optional): The root directory for ``data_prefix`` and
|
| 49 |
+
``ann_file``. Defaults to None.
|
| 50 |
+
data_prefix (dict, optional): Prefix for training data. Defaults to
|
| 51 |
+
dict(img_path=None, seg_map_path=None).
|
| 52 |
+
img_suffix (str): Suffix of images. Default: '.jpg'
|
| 53 |
+
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
|
| 54 |
+
filter_cfg (dict, optional): Config for filter data. Defaults to None.
|
| 55 |
+
indices (int or Sequence[int], optional): Support using first few
|
| 56 |
+
data in annotation file to facilitate training/testing on a smaller
|
| 57 |
+
dataset. Defaults to None which means using all ``data_infos``.
|
| 58 |
+
serialize_data (bool, optional): Whether to hold memory using
|
| 59 |
+
serialized objects, when enabled, data loader workers can use
|
| 60 |
+
shared RAM from master process instead of making a copy. Defaults
|
| 61 |
+
to True.
|
| 62 |
+
pipeline (list, optional): Processing pipeline. Defaults to [].
|
| 63 |
+
test_mode (bool, optional): ``test_mode=True`` means in test phase.
|
| 64 |
+
Defaults to False.
|
| 65 |
+
lazy_init (bool, optional): Whether to load annotation during
|
| 66 |
+
instantiation. In some cases, such as visualization, only the meta
|
| 67 |
+
information of the dataset is needed, which is not necessary to
|
| 68 |
+
load annotation file. ``Basedataset`` can skip load annotations to
|
| 69 |
+
save time by set ``lazy_init=True``. Defaults to False.
|
| 70 |
+
max_refetch (int, optional): If ``Basedataset.prepare_data`` get a
|
| 71 |
+
None img. The maximum extra number of cycles to get a valid
|
| 72 |
+
image. Defaults to 1000.
|
| 73 |
+
ignore_index (int): The label index to be ignored. Default: 255
|
| 74 |
+
reduce_zero_label (bool): Whether to mark label zero as ignored.
|
| 75 |
+
Default to False.
|
| 76 |
+
backend_args (dict, Optional): Arguments to instantiate a file backend.
|
| 77 |
+
See https://mmengine.readthedocs.io/en/latest/api/fileio.htm
|
| 78 |
+
for details. Defaults to None.
|
| 79 |
+
Notes: mmcv>=2.0.0rc4, mmengine>=0.2.0 required.
|
| 80 |
+
"""
|
| 81 |
+
METAINFO: dict = dict()
|
| 82 |
+
|
| 83 |
+
def __init__(self,
|
| 84 |
+
ann_file: str = '',
|
| 85 |
+
img_suffix='.jpg',
|
| 86 |
+
seg_map_suffix='.png',
|
| 87 |
+
metainfo: Optional[dict] = None,
|
| 88 |
+
data_root: Optional[str] = None,
|
| 89 |
+
data_prefix: dict = dict(img_path='', seg_map_path=''),
|
| 90 |
+
filter_cfg: Optional[dict] = None,
|
| 91 |
+
indices: Optional[Union[int, Sequence[int]]] = None,
|
| 92 |
+
serialize_data: bool = True,
|
| 93 |
+
pipeline: List[Union[dict, Callable]] = [],
|
| 94 |
+
test_mode: bool = False,
|
| 95 |
+
lazy_init: bool = False,
|
| 96 |
+
max_refetch: int = 1000,
|
| 97 |
+
ignore_index: int = 255,
|
| 98 |
+
reduce_zero_label: bool = False,
|
| 99 |
+
backend_args: Optional[dict] = None) -> None:
|
| 100 |
+
|
| 101 |
+
self.img_suffix = img_suffix
|
| 102 |
+
self.seg_map_suffix = seg_map_suffix
|
| 103 |
+
self.ignore_index = ignore_index
|
| 104 |
+
self.reduce_zero_label = reduce_zero_label
|
| 105 |
+
self.backend_args = backend_args.copy() if backend_args else None
|
| 106 |
+
|
| 107 |
+
self.data_root = data_root
|
| 108 |
+
self.data_prefix = copy.copy(data_prefix)
|
| 109 |
+
self.ann_file = ann_file
|
| 110 |
+
self.filter_cfg = copy.deepcopy(filter_cfg)
|
| 111 |
+
self._indices = indices
|
| 112 |
+
self.serialize_data = serialize_data
|
| 113 |
+
self.test_mode = test_mode
|
| 114 |
+
self.max_refetch = max_refetch
|
| 115 |
+
self.data_list: List[dict] = []
|
| 116 |
+
self.data_bytes: np.ndarray
|
| 117 |
+
|
| 118 |
+
# Set meta information.
|
| 119 |
+
self._metainfo = self._load_metainfo(copy.deepcopy(metainfo))
|
| 120 |
+
|
| 121 |
+
# Get label map for custom classes
|
| 122 |
+
new_classes = self._metainfo.get('classes', None)
|
| 123 |
+
self.label_map = self.get_label_map(new_classes)
|
| 124 |
+
self._metainfo.update(
|
| 125 |
+
dict(
|
| 126 |
+
label_map=self.label_map,
|
| 127 |
+
reduce_zero_label=self.reduce_zero_label))
|
| 128 |
+
|
| 129 |
+
# Update palette based on label map or generate palette
|
| 130 |
+
# if it is not defined
|
| 131 |
+
updated_palette = self._update_palette()
|
| 132 |
+
self._metainfo.update(dict(palette=updated_palette))
|
| 133 |
+
|
| 134 |
+
# Join paths.
|
| 135 |
+
if self.data_root is not None:
|
| 136 |
+
self._join_prefix()
|
| 137 |
+
|
| 138 |
+
# Build pipeline.
|
| 139 |
+
# import ipdb; ipdb.set_trace()
|
| 140 |
+
self.pipeline = Compose(pipeline)
|
| 141 |
+
# Full initialize the dataset.
|
| 142 |
+
if not lazy_init:
|
| 143 |
+
self.full_init()
|
| 144 |
+
|
| 145 |
+
if test_mode:
|
| 146 |
+
assert self._metainfo.get('classes') is not None, \
|
| 147 |
+
'dataset metainfo `classes` should be specified when testing'
|
| 148 |
+
|
| 149 |
+
@classmethod
|
| 150 |
+
def get_label_map(cls,
|
| 151 |
+
new_classes: Optional[Sequence] = None
|
| 152 |
+
) -> Union[Dict, None]:
|
| 153 |
+
"""Require label mapping.
|
| 154 |
+
|
| 155 |
+
The ``label_map`` is a dictionary, its keys are the old label ids and
|
| 156 |
+
its values are the new label ids, and is used for changing pixel
|
| 157 |
+
labels in load_annotations. If and only if old classes in cls.METAINFO
|
| 158 |
+
is not equal to new classes in self._metainfo and nether of them is not
|
| 159 |
+
None, `label_map` is not None.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
new_classes (list, tuple, optional): The new classes name from
|
| 163 |
+
metainfo. Default to None.
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
dict, optional: The mapping from old classes in cls.METAINFO to
|
| 168 |
+
new classes in self._metainfo
|
| 169 |
+
"""
|
| 170 |
+
old_classes = cls.METAINFO.get('classes', None)
|
| 171 |
+
if (new_classes is not None and old_classes is not None
|
| 172 |
+
and list(new_classes) != list(old_classes)):
|
| 173 |
+
|
| 174 |
+
label_map = {}
|
| 175 |
+
if not set(new_classes).issubset(cls.METAINFO['classes']):
|
| 176 |
+
raise ValueError(
|
| 177 |
+
f'new classes {new_classes} is not a '
|
| 178 |
+
f'subset of classes {old_classes} in METAINFO.')
|
| 179 |
+
for i, c in enumerate(old_classes):
|
| 180 |
+
if c not in new_classes:
|
| 181 |
+
label_map[i] = 255
|
| 182 |
+
else:
|
| 183 |
+
label_map[i] = new_classes.index(c)
|
| 184 |
+
return label_map
|
| 185 |
+
else:
|
| 186 |
+
return None
|
| 187 |
+
|
| 188 |
+
def _update_palette(self) -> list:
|
| 189 |
+
"""Update palette after loading metainfo.
|
| 190 |
+
|
| 191 |
+
If length of palette is equal to classes, just return the palette.
|
| 192 |
+
If palette is not defined, it will randomly generate a palette.
|
| 193 |
+
If classes is updated by customer, it will return the subset of
|
| 194 |
+
palette.
|
| 195 |
+
|
| 196 |
+
Returns:
|
| 197 |
+
Sequence: Palette for current dataset.
|
| 198 |
+
"""
|
| 199 |
+
palette = self._metainfo.get('palette', [])
|
| 200 |
+
classes = self._metainfo.get('classes', [])
|
| 201 |
+
# palette does match classes
|
| 202 |
+
if len(palette) == len(classes):
|
| 203 |
+
return palette
|
| 204 |
+
|
| 205 |
+
if len(palette) == 0:
|
| 206 |
+
# Get random state before set seed, and restore
|
| 207 |
+
# random state later.
|
| 208 |
+
# It will prevent loss of randomness, as the palette
|
| 209 |
+
# may be different in each iteration if not specified.
|
| 210 |
+
# See: https://github.com/open-mmlab/mmdetection/issues/5844
|
| 211 |
+
state = np.random.get_state()
|
| 212 |
+
np.random.seed(42)
|
| 213 |
+
# random palette
|
| 214 |
+
new_palette = np.random.randint(
|
| 215 |
+
0, 255, size=(len(classes), 3)).tolist()
|
| 216 |
+
np.random.set_state(state)
|
| 217 |
+
elif len(palette) >= len(classes) and self.label_map is not None:
|
| 218 |
+
new_palette = []
|
| 219 |
+
# return subset of palette
|
| 220 |
+
for old_id, new_id in sorted(
|
| 221 |
+
self.label_map.items(), key=lambda x: x[1]):
|
| 222 |
+
if new_id != 255:
|
| 223 |
+
new_palette.append(palette[old_id])
|
| 224 |
+
new_palette = type(palette)(new_palette)
|
| 225 |
+
else:
|
| 226 |
+
raise ValueError('palette does not match classes '
|
| 227 |
+
f'as metainfo is {self._metainfo}.')
|
| 228 |
+
return new_palette
|
| 229 |
+
|
| 230 |
+
def load_data_list(self) -> List[dict]:
|
| 231 |
+
"""Load annotation from directory or annotation file.
|
| 232 |
+
|
| 233 |
+
Returns:
|
| 234 |
+
list[dict]: All data info of dataset.
|
| 235 |
+
"""
|
| 236 |
+
data_list = []
|
| 237 |
+
img_dir = self.data_prefix.get('img_path', None)
|
| 238 |
+
ann_dir = self.data_prefix.get('seg_map_path', None)
|
| 239 |
+
if osp.isfile(self.ann_file):
|
| 240 |
+
lines = mmengine.list_from_file(
|
| 241 |
+
self.ann_file, backend_args=self.backend_args)
|
| 242 |
+
for line in lines:
|
| 243 |
+
img_name = line.strip()
|
| 244 |
+
data_info = dict(
|
| 245 |
+
img_path=osp.join(img_dir, img_name + self.img_suffix))
|
| 246 |
+
if ann_dir is not None:
|
| 247 |
+
seg_map = img_name + self.seg_map_suffix
|
| 248 |
+
data_info['seg_map_path'] = osp.join(ann_dir, seg_map)
|
| 249 |
+
data_info['label_map'] = self.label_map
|
| 250 |
+
data_info['reduce_zero_label'] = self.reduce_zero_label
|
| 251 |
+
data_info['seg_fields'] = []
|
| 252 |
+
data_list.append(data_info)
|
| 253 |
+
else:
|
| 254 |
+
for img in fileio.list_dir_or_file(
|
| 255 |
+
dir_path=img_dir,
|
| 256 |
+
list_dir=False,
|
| 257 |
+
suffix=self.img_suffix,
|
| 258 |
+
recursive=True,
|
| 259 |
+
backend_args=self.backend_args):
|
| 260 |
+
data_info = dict(img_path=osp.join(img_dir, img))
|
| 261 |
+
if ann_dir is not None:
|
| 262 |
+
seg_map = img.replace(self.img_suffix, self.seg_map_suffix)
|
| 263 |
+
data_info['seg_map_path'] = osp.join(ann_dir, seg_map)
|
| 264 |
+
data_info['label_map'] = self.label_map
|
| 265 |
+
data_info['reduce_zero_label'] = self.reduce_zero_label
|
| 266 |
+
data_info['seg_fields'] = []
|
| 267 |
+
data_list.append(data_info)
|
| 268 |
+
data_list = sorted(data_list, key=lambda x: x['img_path'])
|
| 269 |
+
return data_list
|
mmseg/datasets/chase_db1.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
|
| 3 |
+
from mmseg.registry import DATASETS
|
| 4 |
+
from .basesegdataset import BaseSegDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class ChaseDB1Dataset(BaseSegDataset):
|
| 9 |
+
"""Chase_db1 dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for Chase_db1, 0 stands for background,
|
| 12 |
+
which is included in 2 categories. ``reduce_zero_label`` is fixed to False.
|
| 13 |
+
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_1stHO.png'.
|
| 15 |
+
"""
|
| 16 |
+
METAINFO = dict(
|
| 17 |
+
classes=('background', 'vessel'),
|
| 18 |
+
palette=[[120, 120, 120], [6, 230, 230]])
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
img_suffix='.png',
|
| 22 |
+
seg_map_suffix='_1stHO.png',
|
| 23 |
+
reduce_zero_label=False,
|
| 24 |
+
**kwargs) -> None:
|
| 25 |
+
super().__init__(
|
| 26 |
+
img_suffix=img_suffix,
|
| 27 |
+
seg_map_suffix=seg_map_suffix,
|
| 28 |
+
reduce_zero_label=reduce_zero_label,
|
| 29 |
+
**kwargs)
|
| 30 |
+
assert self.file_client.exists(self.data_prefix['img_path'])
|
mmseg/datasets/cityscapes.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class CityscapesDataset(BaseSegDataset):
|
| 8 |
+
"""Cityscapes dataset.
|
| 9 |
+
|
| 10 |
+
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
|
| 11 |
+
fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
|
| 12 |
+
"""
|
| 13 |
+
METAINFO = dict(
|
| 14 |
+
classes=('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
|
| 15 |
+
'traffic light', 'traffic sign', 'vegetation', 'terrain',
|
| 16 |
+
'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train',
|
| 17 |
+
'motorcycle', 'bicycle'),
|
| 18 |
+
palette=[[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
|
| 19 |
+
[190, 153, 153], [153, 153, 153], [250, 170,
|
| 20 |
+
30], [220, 220, 0],
|
| 21 |
+
[107, 142, 35], [152, 251, 152], [70, 130, 180],
|
| 22 |
+
[220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70],
|
| 23 |
+
[0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]])
|
| 24 |
+
|
| 25 |
+
def __init__(self,
|
| 26 |
+
img_suffix='_leftImg8bit.png',
|
| 27 |
+
seg_map_suffix='_gtFine_labelTrainIds.png',
|
| 28 |
+
**kwargs) -> None:
|
| 29 |
+
super().__init__(
|
| 30 |
+
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
mmseg/datasets/coco_stuff.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class COCOStuffDataset(BaseSegDataset):
|
| 8 |
+
"""COCO-Stuff dataset.
|
| 9 |
+
|
| 10 |
+
In segmentation map annotation for COCO-Stuff, Train-IDs of the 10k version
|
| 11 |
+
are from 1 to 171, where 0 is the ignore index, and Train-ID of COCO Stuff
|
| 12 |
+
164k is from 0 to 170, where 255 is the ignore index. So, they are all 171
|
| 13 |
+
semantic categories. ``reduce_zero_label`` is set to True and False for the
|
| 14 |
+
10k and 164k versions, respectively. The ``img_suffix`` is fixed to '.jpg',
|
| 15 |
+
and ``seg_map_suffix`` is fixed to '.png'.
|
| 16 |
+
"""
|
| 17 |
+
METAINFO = dict(
|
| 18 |
+
classes=(
|
| 19 |
+
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
|
| 20 |
+
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
|
| 21 |
+
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
|
| 22 |
+
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
|
| 23 |
+
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
| 24 |
+
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
|
| 25 |
+
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
|
| 26 |
+
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
|
| 27 |
+
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
|
| 28 |
+
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
| 29 |
+
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
|
| 30 |
+
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
|
| 31 |
+
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
|
| 32 |
+
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
|
| 33 |
+
'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet',
|
| 34 |
+
'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile',
|
| 35 |
+
'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',
|
| 36 |
+
'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble',
|
| 37 |
+
'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower',
|
| 38 |
+
'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel',
|
| 39 |
+
'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal',
|
| 40 |
+
'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net',
|
| 41 |
+
'paper', 'pavement', 'pillow', 'plant-other', 'plastic',
|
| 42 |
+
'platform', 'playingfield', 'railing', 'railroad', 'river', 'road',
|
| 43 |
+
'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf',
|
| 44 |
+
'sky-other', 'skyscraper', 'snow', 'solid-other', 'stairs',
|
| 45 |
+
'stone', 'straw', 'structural-other', 'table', 'tent',
|
| 46 |
+
'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick',
|
| 47 |
+
'wall-concrete', 'wall-other', 'wall-panel', 'wall-stone',
|
| 48 |
+
'wall-tile', 'wall-wood', 'water-other', 'waterdrops',
|
| 49 |
+
'window-blind', 'window-other', 'wood'),
|
| 50 |
+
palette=[[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192],
|
| 51 |
+
[0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64],
|
| 52 |
+
[0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224],
|
| 53 |
+
[0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192],
|
| 54 |
+
[0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192],
|
| 55 |
+
[128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128],
|
| 56 |
+
[64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160],
|
| 57 |
+
[0, 32, 0], [0, 128, 128], [64, 128, 160], [128, 160, 0],
|
| 58 |
+
[0, 128, 0], [192, 128, 32], [128, 96, 128], [0, 0, 128],
|
| 59 |
+
[64, 0, 32], [0, 224, 128], [128, 0, 0], [192, 0, 160],
|
| 60 |
+
[0, 96, 128], [128, 128, 128], [64, 0, 160], [128, 224, 128],
|
| 61 |
+
[128, 128, 64], [192, 0, 32], [128, 96, 0], [128, 0, 192],
|
| 62 |
+
[0, 128, 32], [64, 224, 0], [0, 0, 64], [128, 128, 160],
|
| 63 |
+
[64, 96, 0], [0, 128, 192], [0, 128, 160], [192, 224, 0],
|
| 64 |
+
[0, 128, 64], [128, 128, 32], [192, 32, 128], [0, 64, 192],
|
| 65 |
+
[0, 0, 32], [64, 160, 128], [128, 64, 64], [128, 0, 160],
|
| 66 |
+
[64, 32, 128], [128, 192, 192], [0, 0, 160], [192, 160, 128],
|
| 67 |
+
[128, 192, 0], [128, 0, 96], [192, 32, 0], [128, 64, 128],
|
| 68 |
+
[64, 128, 96], [64, 160, 0], [0, 64, 0], [192, 128, 224],
|
| 69 |
+
[64, 32, 0], [0, 192, 128], [64, 128, 224], [192, 160, 0],
|
| 70 |
+
[0, 192, 0], [192, 128, 96], [192, 96, 128], [0, 64, 128],
|
| 71 |
+
[64, 0, 96], [64, 224, 128], [128, 64, 0], [192, 0, 224],
|
| 72 |
+
[64, 96, 128], [128, 192, 128], [64, 0, 224], [192, 224, 128],
|
| 73 |
+
[128, 192, 64], [192, 0, 96], [192, 96, 0], [128, 64, 192],
|
| 74 |
+
[0, 128, 96], [0, 224, 0], [64, 64, 64], [128, 128, 224],
|
| 75 |
+
[0, 96, 0], [64, 192, 192], [0, 128, 224], [128, 224, 0],
|
| 76 |
+
[64, 192, 64], [128, 128, 96], [128, 32, 128], [64, 0, 192],
|
| 77 |
+
[0, 64, 96], [0, 160, 128], [192, 0, 64], [128, 64, 224],
|
| 78 |
+
[0, 32, 128], [192, 128, 192], [0, 64, 224], [128, 160, 128],
|
| 79 |
+
[192, 128, 0], [128, 64, 32], [128, 32, 64], [192, 0, 128],
|
| 80 |
+
[64, 192, 32], [0, 160, 64], [64, 0, 0], [192, 192, 160],
|
| 81 |
+
[0, 32, 64], [64, 128, 128], [64, 192, 160], [128, 160, 64],
|
| 82 |
+
[64, 128, 0], [192, 192, 32], [128, 96, 192], [64, 0, 128],
|
| 83 |
+
[64, 64, 32], [0, 224, 192], [192, 0, 0], [192, 64, 160],
|
| 84 |
+
[0, 96, 192], [192, 128, 128], [64, 64, 160], [128, 224, 192],
|
| 85 |
+
[192, 128, 64], [192, 64, 32], [128, 96, 64], [192, 0, 192],
|
| 86 |
+
[0, 192, 32], [64, 224, 64], [64, 0, 64], [128, 192, 160],
|
| 87 |
+
[64, 96, 64], [64, 128, 192], [0, 192, 160], [192, 224, 64],
|
| 88 |
+
[64, 128, 64], [128, 192, 32], [192, 32, 192], [64, 64, 192],
|
| 89 |
+
[0, 64, 32], [64, 160, 192], [192, 64, 64], [128, 64, 160],
|
| 90 |
+
[64, 32, 192], [192, 192, 192], [0, 64, 160], [192, 160, 192],
|
| 91 |
+
[192, 192, 0], [128, 64, 96], [192, 32, 64], [192, 64, 128],
|
| 92 |
+
[64, 192, 96], [64, 160, 64], [64, 64, 0]])
|
| 93 |
+
|
| 94 |
+
def __init__(self,
|
| 95 |
+
img_suffix='.jpg',
|
| 96 |
+
seg_map_suffix='_labelTrainIds.png',
|
| 97 |
+
**kwargs) -> None:
|
| 98 |
+
super().__init__(
|
| 99 |
+
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
mmseg/datasets/dark_zurich.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .cityscapes import CityscapesDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class DarkZurichDataset(CityscapesDataset):
|
| 8 |
+
"""DarkZurichDataset dataset."""
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
img_suffix='_rgb_anon.png',
|
| 12 |
+
seg_map_suffix='_gt_labelTrainIds.png',
|
| 13 |
+
**kwargs) -> None:
|
| 14 |
+
super().__init__(
|
| 15 |
+
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
mmseg/datasets/dataset_wrappers.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import collections
|
| 3 |
+
import copy
|
| 4 |
+
from typing import List, Optional, Sequence, Union
|
| 5 |
+
|
| 6 |
+
from mmengine.dataset import ConcatDataset, force_full_init
|
| 7 |
+
|
| 8 |
+
from mmseg.registry import DATASETS, TRANSFORMS
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@DATASETS.register_module()
|
| 12 |
+
class MultiImageMixDataset:
|
| 13 |
+
"""A wrapper of multiple images mixed dataset.
|
| 14 |
+
|
| 15 |
+
Suitable for training on multiple images mixed data augmentation like
|
| 16 |
+
mosaic and mixup.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
dataset (ConcatDataset or dict): The dataset to be mixed.
|
| 20 |
+
pipeline (Sequence[dict]): Sequence of transform object or
|
| 21 |
+
config dict to be composed.
|
| 22 |
+
skip_type_keys (list[str], optional): Sequence of type string to
|
| 23 |
+
be skip pipeline. Default to None.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self,
|
| 27 |
+
dataset: Union[ConcatDataset, dict],
|
| 28 |
+
pipeline: Sequence[dict],
|
| 29 |
+
skip_type_keys: Optional[List[str]] = None,
|
| 30 |
+
lazy_init: bool = False) -> None:
|
| 31 |
+
assert isinstance(pipeline, collections.abc.Sequence)
|
| 32 |
+
|
| 33 |
+
if isinstance(dataset, dict):
|
| 34 |
+
self.dataset = DATASETS.build(dataset)
|
| 35 |
+
elif isinstance(dataset, ConcatDataset):
|
| 36 |
+
self.dataset = dataset
|
| 37 |
+
else:
|
| 38 |
+
raise TypeError(
|
| 39 |
+
'elements in datasets sequence should be config or '
|
| 40 |
+
f'`ConcatDataset` instance, but got {type(dataset)}')
|
| 41 |
+
|
| 42 |
+
if skip_type_keys is not None:
|
| 43 |
+
assert all([
|
| 44 |
+
isinstance(skip_type_key, str)
|
| 45 |
+
for skip_type_key in skip_type_keys
|
| 46 |
+
])
|
| 47 |
+
self._skip_type_keys = skip_type_keys
|
| 48 |
+
|
| 49 |
+
self.pipeline = []
|
| 50 |
+
self.pipeline_types = []
|
| 51 |
+
for transform in pipeline:
|
| 52 |
+
if isinstance(transform, dict):
|
| 53 |
+
self.pipeline_types.append(transform['type'])
|
| 54 |
+
transform = TRANSFORMS.build(transform)
|
| 55 |
+
self.pipeline.append(transform)
|
| 56 |
+
else:
|
| 57 |
+
raise TypeError('pipeline must be a dict')
|
| 58 |
+
|
| 59 |
+
self._metainfo = self.dataset.metainfo
|
| 60 |
+
self.num_samples = len(self.dataset)
|
| 61 |
+
|
| 62 |
+
self._fully_initialized = False
|
| 63 |
+
if not lazy_init:
|
| 64 |
+
self.full_init()
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def metainfo(self) -> dict:
|
| 68 |
+
"""Get the meta information of the multi-image-mixed dataset.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
dict: The meta information of multi-image-mixed dataset.
|
| 72 |
+
"""
|
| 73 |
+
return copy.deepcopy(self._metainfo)
|
| 74 |
+
|
| 75 |
+
def full_init(self):
|
| 76 |
+
"""Loop to ``full_init`` each dataset."""
|
| 77 |
+
if self._fully_initialized:
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
self.dataset.full_init()
|
| 81 |
+
self._ori_len = len(self.dataset)
|
| 82 |
+
self._fully_initialized = True
|
| 83 |
+
|
| 84 |
+
@force_full_init
|
| 85 |
+
def get_data_info(self, idx: int) -> dict:
|
| 86 |
+
"""Get annotation by index.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
idx (int): Global index of ``ConcatDataset``.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
dict: The idx-th annotation of the datasets.
|
| 93 |
+
"""
|
| 94 |
+
return self.dataset.get_data_info(idx)
|
| 95 |
+
|
| 96 |
+
@force_full_init
|
| 97 |
+
def __len__(self):
|
| 98 |
+
return self.num_samples
|
| 99 |
+
|
| 100 |
+
def __getitem__(self, idx):
|
| 101 |
+
results = copy.deepcopy(self.dataset[idx])
|
| 102 |
+
for (transform, transform_type) in zip(self.pipeline,
|
| 103 |
+
self.pipeline_types):
|
| 104 |
+
if self._skip_type_keys is not None and \
|
| 105 |
+
transform_type in self._skip_type_keys:
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
if hasattr(transform, 'get_indices'):
|
| 109 |
+
indices = transform.get_indices(self.dataset)
|
| 110 |
+
if not isinstance(indices, collections.abc.Sequence):
|
| 111 |
+
indices = [indices]
|
| 112 |
+
mix_results = [
|
| 113 |
+
copy.deepcopy(self.dataset[index]) for index in indices
|
| 114 |
+
]
|
| 115 |
+
results['mix_results'] = mix_results
|
| 116 |
+
|
| 117 |
+
results = transform(results)
|
| 118 |
+
|
| 119 |
+
if 'mix_results' in results:
|
| 120 |
+
results.pop('mix_results')
|
| 121 |
+
|
| 122 |
+
return results
|
| 123 |
+
|
| 124 |
+
def update_skip_type_keys(self, skip_type_keys):
|
| 125 |
+
"""Update skip_type_keys.
|
| 126 |
+
|
| 127 |
+
It is called by an external hook.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
skip_type_keys (list[str], optional): Sequence of type
|
| 131 |
+
string to be skip pipeline.
|
| 132 |
+
"""
|
| 133 |
+
assert all([
|
| 134 |
+
isinstance(skip_type_key, str) for skip_type_key in skip_type_keys
|
| 135 |
+
])
|
| 136 |
+
self._skip_type_keys = skip_type_keys
|
mmseg/datasets/decathlon.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import copy
|
| 3 |
+
import os.path as osp
|
| 4 |
+
from typing import List
|
| 5 |
+
|
| 6 |
+
from mmengine.fileio import load
|
| 7 |
+
|
| 8 |
+
from mmseg.registry import DATASETS
|
| 9 |
+
from .basesegdataset import BaseSegDataset
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@DATASETS.register_module()
|
| 13 |
+
class DecathlonDataset(BaseSegDataset):
|
| 14 |
+
"""Dataset for Dacathlon dataset.
|
| 15 |
+
|
| 16 |
+
The dataset.json format is shown as follows
|
| 17 |
+
|
| 18 |
+
.. code-block:: none
|
| 19 |
+
|
| 20 |
+
{
|
| 21 |
+
"name": "BRATS",
|
| 22 |
+
"tensorImageSize": "4D",
|
| 23 |
+
"modality":
|
| 24 |
+
{
|
| 25 |
+
"0": "FLAIR",
|
| 26 |
+
"1": "T1w",
|
| 27 |
+
"2": "t1gd",
|
| 28 |
+
"3": "T2w"
|
| 29 |
+
},
|
| 30 |
+
"labels": {
|
| 31 |
+
"0": "background",
|
| 32 |
+
"1": "edema",
|
| 33 |
+
"2": "non-enhancing tumor",
|
| 34 |
+
"3": "enhancing tumour"
|
| 35 |
+
},
|
| 36 |
+
"numTraining": 484,
|
| 37 |
+
"numTest": 266,
|
| 38 |
+
"training":
|
| 39 |
+
[
|
| 40 |
+
{
|
| 41 |
+
"image": "./imagesTr/BRATS_306.nii.gz"
|
| 42 |
+
"label": "./labelsTr/BRATS_306.nii.gz"
|
| 43 |
+
...
|
| 44 |
+
}
|
| 45 |
+
]
|
| 46 |
+
"test":
|
| 47 |
+
[
|
| 48 |
+
"./imagesTs/BRATS_557.nii.gz"
|
| 49 |
+
...
|
| 50 |
+
]
|
| 51 |
+
}
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def load_data_list(self) -> List[dict]:
|
| 55 |
+
"""Load annotation from directory or annotation file.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
list[dict]: All data info of dataset.
|
| 59 |
+
"""
|
| 60 |
+
# `self.ann_file` denotes the absolute annotation file path if
|
| 61 |
+
# `self.root=None` or relative path if `self.root=/path/to/data/`.
|
| 62 |
+
annotations = load(self.ann_file)
|
| 63 |
+
if not isinstance(annotations, dict):
|
| 64 |
+
raise TypeError(f'The annotations loaded from annotation file '
|
| 65 |
+
f'should be a dict, but got {type(annotations)}!')
|
| 66 |
+
raw_data_list = annotations[
|
| 67 |
+
'training'] if not self.test_mode else annotations['test']
|
| 68 |
+
data_list = []
|
| 69 |
+
for raw_data_info in raw_data_list:
|
| 70 |
+
# `2:` works for removing './' in file path, which will break
|
| 71 |
+
# loading from cloud storage.
|
| 72 |
+
if isinstance(raw_data_info, dict):
|
| 73 |
+
data_info = dict(
|
| 74 |
+
img_path=osp.join(self.data_root, raw_data_info['image']
|
| 75 |
+
[2:]))
|
| 76 |
+
data_info['seg_map_path'] = osp.join(
|
| 77 |
+
self.data_root, raw_data_info['label'][2:])
|
| 78 |
+
else:
|
| 79 |
+
data_info = dict(
|
| 80 |
+
img_path=osp.join(self.data_root, raw_data_info)[2:])
|
| 81 |
+
data_info['label_map'] = self.label_map
|
| 82 |
+
data_info['reduce_zero_label'] = self.reduce_zero_label
|
| 83 |
+
data_info['seg_fields'] = []
|
| 84 |
+
data_list.append(data_info)
|
| 85 |
+
annotations.pop('training')
|
| 86 |
+
annotations.pop('test')
|
| 87 |
+
|
| 88 |
+
metainfo = copy.deepcopy(annotations)
|
| 89 |
+
metainfo['classes'] = [*metainfo['labels'].values()]
|
| 90 |
+
# Meta information load from annotation file will not influence the
|
| 91 |
+
# existed meta information load from `BaseDataset.METAINFO` and
|
| 92 |
+
# `metainfo` arguments defined in constructor.
|
| 93 |
+
for k, v in metainfo.items():
|
| 94 |
+
self._metainfo.setdefault(k, v)
|
| 95 |
+
|
| 96 |
+
return data_list
|
mmseg/datasets/drive.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
|
| 3 |
+
from mmseg.registry import DATASETS
|
| 4 |
+
from .basesegdataset import BaseSegDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class DRIVEDataset(BaseSegDataset):
|
| 9 |
+
"""DRIVE dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for DRIVE, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_manual1.png'.
|
| 15 |
+
"""
|
| 16 |
+
METAINFO = dict(
|
| 17 |
+
classes=('background', 'vessel'),
|
| 18 |
+
palette=[[120, 120, 120], [6, 230, 230]])
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
img_suffix='.png',
|
| 22 |
+
seg_map_suffix='_manual1.png',
|
| 23 |
+
reduce_zero_label=False,
|
| 24 |
+
**kwargs) -> None:
|
| 25 |
+
super().__init__(
|
| 26 |
+
img_suffix=img_suffix,
|
| 27 |
+
seg_map_suffix=seg_map_suffix,
|
| 28 |
+
reduce_zero_label=reduce_zero_label,
|
| 29 |
+
**kwargs)
|
| 30 |
+
assert self.file_client.exists(self.data_prefix['img_path'])
|
mmseg/datasets/hrf.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
|
| 3 |
+
from mmseg.registry import DATASETS
|
| 4 |
+
from .basesegdataset import BaseSegDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class HRFDataset(BaseSegDataset):
|
| 9 |
+
"""HRF dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for HRF, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'.png'.
|
| 15 |
+
"""
|
| 16 |
+
METAINFO = dict(
|
| 17 |
+
classes=('background', 'vessel'),
|
| 18 |
+
palette=[[120, 120, 120], [6, 230, 230]])
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
img_suffix='.png',
|
| 22 |
+
seg_map_suffix='.png',
|
| 23 |
+
reduce_zero_label=False,
|
| 24 |
+
**kwargs) -> None:
|
| 25 |
+
super().__init__(
|
| 26 |
+
img_suffix=img_suffix,
|
| 27 |
+
seg_map_suffix=seg_map_suffix,
|
| 28 |
+
reduce_zero_label=reduce_zero_label,
|
| 29 |
+
**kwargs)
|
| 30 |
+
assert self.file_client.exists(self.data_prefix['img_path'])
|
mmseg/datasets/isaid.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import mmengine.fileio as fileio
|
| 3 |
+
|
| 4 |
+
from mmseg.registry import DATASETS
|
| 5 |
+
from .basesegdataset import BaseSegDataset
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@DATASETS.register_module()
|
| 9 |
+
class iSAIDDataset(BaseSegDataset):
|
| 10 |
+
""" iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images
|
| 11 |
+
In segmentation map annotation for iSAID dataset, which is included
|
| 12 |
+
in 16 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_manual1.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
METAINFO = dict(
|
| 18 |
+
classes=('background', 'ship', 'store_tank', 'baseball_diamond',
|
| 19 |
+
'tennis_court', 'basketball_court', 'Ground_Track_Field',
|
| 20 |
+
'Bridge', 'Large_Vehicle', 'Small_Vehicle', 'Helicopter',
|
| 21 |
+
'Swimming_pool', 'Roundabout', 'Soccer_ball_field', 'plane',
|
| 22 |
+
'Harbor'),
|
| 23 |
+
palette=[[0, 0, 0], [0, 0, 63], [0, 63, 63], [0, 63, 0], [0, 63, 127],
|
| 24 |
+
[0, 63, 191], [0, 63, 255], [0, 127, 63], [0, 127, 127],
|
| 25 |
+
[0, 0, 127], [0, 0, 191], [0, 0, 255], [0, 191, 127],
|
| 26 |
+
[0, 127, 191], [0, 127, 255], [0, 100, 155]])
|
| 27 |
+
|
| 28 |
+
def __init__(self,
|
| 29 |
+
img_suffix='.png',
|
| 30 |
+
seg_map_suffix='_instance_color_RGB.png',
|
| 31 |
+
ignore_index=255,
|
| 32 |
+
**kwargs) -> None:
|
| 33 |
+
super().__init__(
|
| 34 |
+
img_suffix=img_suffix,
|
| 35 |
+
seg_map_suffix=seg_map_suffix,
|
| 36 |
+
ignore_index=ignore_index,
|
| 37 |
+
**kwargs)
|
| 38 |
+
assert fileio.exists(
|
| 39 |
+
self.data_prefix['img_path'], backend_args=self.backend_args)
|
mmseg/datasets/isprs.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class ISPRSDataset(BaseSegDataset):
|
| 8 |
+
"""ISPRS dataset.
|
| 9 |
+
|
| 10 |
+
In segmentation map annotation for ISPRS, 0 is the ignore index.
|
| 11 |
+
``reduce_zero_label`` should be set to True. The ``img_suffix`` and
|
| 12 |
+
``seg_map_suffix`` are both fixed to '.png'.
|
| 13 |
+
"""
|
| 14 |
+
METAINFO = dict(
|
| 15 |
+
classes=('impervious_surface', 'building', 'low_vegetation', 'tree',
|
| 16 |
+
'car', 'clutter'),
|
| 17 |
+
palette=[[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
|
| 18 |
+
[255, 255, 0], [255, 0, 0]])
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
img_suffix='.png',
|
| 22 |
+
seg_map_suffix='.png',
|
| 23 |
+
reduce_zero_label=True,
|
| 24 |
+
**kwargs) -> None:
|
| 25 |
+
super().__init__(
|
| 26 |
+
img_suffix=img_suffix,
|
| 27 |
+
seg_map_suffix=seg_map_suffix,
|
| 28 |
+
reduce_zero_label=reduce_zero_label,
|
| 29 |
+
**kwargs)
|
mmseg/datasets/lip.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class LIPDataset(BaseSegDataset):
|
| 8 |
+
"""LIP dataset.
|
| 9 |
+
|
| 10 |
+
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is fixed to
|
| 11 |
+
'.png'.
|
| 12 |
+
"""
|
| 13 |
+
METAINFO = dict(
|
| 14 |
+
classes=('Background', 'Hat', 'Hair', 'Glove', 'Sunglasses',
|
| 15 |
+
'UpperClothes', 'Dress', 'Coat', 'Socks', 'Pants',
|
| 16 |
+
'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm',
|
| 17 |
+
'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe',
|
| 18 |
+
'Right-shoe'),
|
| 19 |
+
palette=(
|
| 20 |
+
[0, 0, 0],
|
| 21 |
+
[128, 0, 0],
|
| 22 |
+
[255, 0, 0],
|
| 23 |
+
[0, 85, 0],
|
| 24 |
+
[170, 0, 51],
|
| 25 |
+
[255, 85, 0],
|
| 26 |
+
[0, 0, 85],
|
| 27 |
+
[0, 119, 221],
|
| 28 |
+
[85, 85, 0],
|
| 29 |
+
[0, 85, 85],
|
| 30 |
+
[85, 51, 0],
|
| 31 |
+
[52, 86, 128],
|
| 32 |
+
[0, 128, 0],
|
| 33 |
+
[0, 0, 255],
|
| 34 |
+
[51, 170, 221],
|
| 35 |
+
[0, 255, 255],
|
| 36 |
+
[85, 255, 170],
|
| 37 |
+
[170, 255, 85],
|
| 38 |
+
[255, 255, 0],
|
| 39 |
+
[255, 170, 0],
|
| 40 |
+
))
|
| 41 |
+
|
| 42 |
+
def __init__(self,
|
| 43 |
+
img_suffix='.jpg',
|
| 44 |
+
seg_map_suffix='.png',
|
| 45 |
+
**kwargs) -> None:
|
| 46 |
+
super().__init__(
|
| 47 |
+
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
mmseg/datasets/loveda.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class LoveDADataset(BaseSegDataset):
|
| 8 |
+
"""LoveDA dataset.
|
| 9 |
+
|
| 10 |
+
In segmentation map annotation for LoveDA, 0 is the ignore index.
|
| 11 |
+
``reduce_zero_label`` should be set to True. The ``img_suffix`` and
|
| 12 |
+
``seg_map_suffix`` are both fixed to '.png'.
|
| 13 |
+
"""
|
| 14 |
+
METAINFO = dict(
|
| 15 |
+
classes=('background', 'building', 'road', 'water', 'barren', 'forest',
|
| 16 |
+
'agricultural'),
|
| 17 |
+
palette=[[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255],
|
| 18 |
+
[159, 129, 183], [0, 255, 0], [255, 195, 128]])
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
img_suffix='.png',
|
| 22 |
+
seg_map_suffix='.png',
|
| 23 |
+
reduce_zero_label=True,
|
| 24 |
+
**kwargs) -> None:
|
| 25 |
+
super().__init__(
|
| 26 |
+
img_suffix=img_suffix,
|
| 27 |
+
seg_map_suffix=seg_map_suffix,
|
| 28 |
+
reduce_zero_label=reduce_zero_label,
|
| 29 |
+
**kwargs)
|
mmseg/datasets/mapillary.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class MapillaryDataset_v1(BaseSegDataset):
|
| 8 |
+
"""Mapillary Vistas Dataset.
|
| 9 |
+
|
| 10 |
+
Dataset paper link:
|
| 11 |
+
http://ieeexplore.ieee.org/document/8237796/
|
| 12 |
+
|
| 13 |
+
v1.2 contain 66 object classes.
|
| 14 |
+
(37 instance-specific)
|
| 15 |
+
|
| 16 |
+
v2.0 contain 124 object classes.
|
| 17 |
+
(70 instance-specific, 46 stuff, 8 void or crowd).
|
| 18 |
+
|
| 19 |
+
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 20 |
+
fixed to '.png' for Mapillary Vistas Dataset.
|
| 21 |
+
"""
|
| 22 |
+
METAINFO = dict(
|
| 23 |
+
classes=('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail',
|
| 24 |
+
'Barrier', 'Wall', 'Bike Lane', 'Crosswalk - Plain',
|
| 25 |
+
'Curb Cut', 'Parking', 'Pedestrian Area', 'Rail Track',
|
| 26 |
+
'Road', 'Service Lane', 'Sidewalk', 'Bridge', 'Building',
|
| 27 |
+
'Tunnel', 'Person', 'Bicyclist', 'Motorcyclist',
|
| 28 |
+
'Other Rider', 'Lane Marking - Crosswalk',
|
| 29 |
+
'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow',
|
| 30 |
+
'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench',
|
| 31 |
+
'Bike Rack', 'Billboard', 'Catch Basin', 'CCTV Camera',
|
| 32 |
+
'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole',
|
| 33 |
+
'Phone Booth', 'Pothole', 'Street Light', 'Pole',
|
| 34 |
+
'Traffic Sign Frame', 'Utility Pole', 'Traffic Light',
|
| 35 |
+
'Traffic Sign (Back)', 'Traffic Sign (Front)', 'Trash Can',
|
| 36 |
+
'Bicycle', 'Boat', 'Bus', 'Car', 'Caravan', 'Motorcycle',
|
| 37 |
+
'On Rails', 'Other Vehicle', 'Trailer', 'Truck',
|
| 38 |
+
'Wheeled Slow', 'Car Mount', 'Ego Vehicle', 'Unlabeled'),
|
| 39 |
+
palette=[[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153],
|
| 40 |
+
[180, 165, 180], [90, 120, 150], [102, 102, 156],
|
| 41 |
+
[128, 64, 255], [140, 140, 200], [170, 170, 170],
|
| 42 |
+
[250, 170, 160], [96, 96, 96],
|
| 43 |
+
[230, 150, 140], [128, 64, 128], [110, 110, 110],
|
| 44 |
+
[244, 35, 232], [150, 100, 100], [70, 70, 70], [150, 120, 90],
|
| 45 |
+
[220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200],
|
| 46 |
+
[200, 128, 128], [255, 255, 255], [64, 170,
|
| 47 |
+
64], [230, 160, 50],
|
| 48 |
+
[70, 130, 180], [190, 255, 255], [152, 251, 152],
|
| 49 |
+
[107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30],
|
| 50 |
+
[100, 140, 180], [220, 220, 220], [220, 128, 128],
|
| 51 |
+
[222, 40, 40], [100, 170, 30], [40, 40, 40], [33, 33, 33],
|
| 52 |
+
[100, 128, 160], [142, 0, 0], [70, 100, 150], [210, 170, 100],
|
| 53 |
+
[153, 153, 153], [128, 128, 128], [0, 0, 80], [250, 170, 30],
|
| 54 |
+
[192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32],
|
| 55 |
+
[150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90],
|
| 56 |
+
[0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
|
| 57 |
+
[0, 0, 70], [0, 0, 192], [32, 32, 32], [120, 10,
|
| 58 |
+
10], [0, 0, 0]])
|
| 59 |
+
|
| 60 |
+
def __init__(self,
|
| 61 |
+
img_suffix='.jpg',
|
| 62 |
+
seg_map_suffix='.png',
|
| 63 |
+
**kwargs) -> None:
|
| 64 |
+
super().__init__(
|
| 65 |
+
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@DATASETS.register_module()
|
| 69 |
+
class MapillaryDataset_v2(BaseSegDataset):
|
| 70 |
+
"""Mapillary Vistas Dataset.
|
| 71 |
+
|
| 72 |
+
Dataset paper link:
|
| 73 |
+
http://ieeexplore.ieee.org/document/8237796/
|
| 74 |
+
|
| 75 |
+
v1.2 contain 66 object classes.
|
| 76 |
+
(37 instance-specific)
|
| 77 |
+
|
| 78 |
+
v2.0 contain 124 object classes.
|
| 79 |
+
(70 instance-specific, 46 stuff, 8 void or crowd).
|
| 80 |
+
|
| 81 |
+
The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 82 |
+
fixed to '.png' for Mapillary Vistas Dataset.
|
| 83 |
+
"""
|
| 84 |
+
METAINFO = dict(
|
| 85 |
+
classes=(
|
| 86 |
+
'Bird', 'Ground Animal', 'Ambiguous Barrier', 'Concrete Block',
|
| 87 |
+
'Curb', 'Fence', 'Guard Rail', 'Barrier', 'Road Median',
|
| 88 |
+
'Road Side', 'Lane Separator', 'Temporary Barrier', 'Wall',
|
| 89 |
+
'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Driveway',
|
| 90 |
+
'Parking', 'Parking Aisle', 'Pedestrian Area', 'Rail Track',
|
| 91 |
+
'Road', 'Road Shoulder', 'Service Lane', 'Sidewalk',
|
| 92 |
+
'Traffic Island', 'Bridge', 'Building', 'Garage', 'Tunnel',
|
| 93 |
+
'Person', 'Person Group', 'Bicyclist', 'Motorcyclist',
|
| 94 |
+
'Other Rider', 'Lane Marking - Dashed Line',
|
| 95 |
+
'Lane Marking - Straight Line', 'Lane Marking - Zigzag Line',
|
| 96 |
+
'Lane Marking - Ambiguous', 'Lane Marking - Arrow (Left)',
|
| 97 |
+
'Lane Marking - Arrow (Other)', 'Lane Marking - Arrow (Right)',
|
| 98 |
+
'Lane Marking - Arrow (Split Left or Straight)',
|
| 99 |
+
'Lane Marking - Arrow (Split Right or Straight)',
|
| 100 |
+
'Lane Marking - Arrow (Straight)', 'Lane Marking - Crosswalk',
|
| 101 |
+
'Lane Marking - Give Way (Row)',
|
| 102 |
+
'Lane Marking - Give Way (Single)',
|
| 103 |
+
'Lane Marking - Hatched (Chevron)',
|
| 104 |
+
'Lane Marking - Hatched (Diagonal)', 'Lane Marking - Other',
|
| 105 |
+
'Lane Marking - Stop Line', 'Lane Marking - Symbol (Bicycle)',
|
| 106 |
+
'Lane Marking - Symbol (Other)', 'Lane Marking - Text',
|
| 107 |
+
'Lane Marking (only) - Dashed Line',
|
| 108 |
+
'Lane Marking (only) - Crosswalk', 'Lane Marking (only) - Other',
|
| 109 |
+
'Lane Marking (only) - Test', 'Mountain', 'Sand', 'Sky', 'Snow',
|
| 110 |
+
'Terrain', 'Vegetation', 'Water', 'Banner', 'Bench', 'Bike Rack',
|
| 111 |
+
'Catch Basin', 'CCTV Camera', 'Fire Hydrant', 'Junction Box',
|
| 112 |
+
'Mailbox', 'Manhole', 'Parking Meter', 'Phone Booth', 'Pothole',
|
| 113 |
+
'Signage - Advertisement', 'Signage - Ambiguous', 'Signage - Back',
|
| 114 |
+
'Signage - Information', 'Signage - Other', 'Signage - Store',
|
| 115 |
+
'Street Light', 'Pole', 'Pole Group', 'Traffic Sign Frame',
|
| 116 |
+
'Utility Pole', 'Traffic Cone', 'Traffic Light - General (Single)',
|
| 117 |
+
'Traffic Light - Pedestrians', 'Traffic Light - General (Upright)',
|
| 118 |
+
'Traffic Light - General (Horizontal)', 'Traffic Light - Cyclists',
|
| 119 |
+
'Traffic Light - Other', 'Traffic Sign - Ambiguous',
|
| 120 |
+
'Traffic Sign (Back)', 'Traffic Sign - Direction (Back)',
|
| 121 |
+
'Traffic Sign - Direction (Front)', 'Traffic Sign (Front)',
|
| 122 |
+
'Traffic Sign - Parking', 'Traffic Sign - Temporary (Back)',
|
| 123 |
+
'Traffic Sign - Temporary (Front)', 'Trash Can', 'Bicycle', 'Boat',
|
| 124 |
+
'Bus', 'Car', 'Caravan', 'Motorcycle', 'On Rails', 'Other Vehicle',
|
| 125 |
+
'Trailer', 'Truck', 'Vehicle Group', 'Wheeled Slow', 'Water Valve',
|
| 126 |
+
'Car Mount', 'Dynamic', 'Ego Vehicle', 'Ground', 'Static',
|
| 127 |
+
'Unlabeled'),
|
| 128 |
+
palette=[[165, 42, 42], [0, 192, 0], [250, 170, 31], [250, 170, 32],
|
| 129 |
+
[196, 196, 196], [190, 153, 153], [180, 165, 180],
|
| 130 |
+
[90, 120, 150], [250, 170, 33], [250, 170, 34],
|
| 131 |
+
[128, 128, 128], [250, 170, 35], [102, 102, 156],
|
| 132 |
+
[128, 64, 255], [140, 140, 200], [170, 170, 170],
|
| 133 |
+
[250, 170, 36], [250, 170, 160], [250, 170, 37], [96, 96, 96],
|
| 134 |
+
[230, 150, 140], [128, 64, 128], [110, 110, 110],
|
| 135 |
+
[110, 110, 110], [244, 35, 232], [128, 196,
|
| 136 |
+
128], [150, 100, 100],
|
| 137 |
+
[70, 70, 70], [150, 150, 150], [150, 120, 90], [220, 20, 60],
|
| 138 |
+
[220, 20, 60], [255, 0, 0], [255, 0, 100], [255, 0, 200],
|
| 139 |
+
[255, 255, 255], [255, 255, 255], [250, 170, 29],
|
| 140 |
+
[250, 170, 28], [250, 170, 26], [250, 170,
|
| 141 |
+
25], [250, 170, 24],
|
| 142 |
+
[250, 170, 22], [250, 170, 21], [250, 170,
|
| 143 |
+
20], [255, 255, 255],
|
| 144 |
+
[250, 170, 19], [250, 170, 18], [250, 170,
|
| 145 |
+
12], [250, 170, 11],
|
| 146 |
+
[255, 255, 255], [255, 255, 255], [250, 170, 16],
|
| 147 |
+
[250, 170, 15], [250, 170, 15], [255, 255, 255],
|
| 148 |
+
[255, 255, 255], [255, 255, 255], [255, 255, 255],
|
| 149 |
+
[64, 170, 64], [230, 160, 50],
|
| 150 |
+
[70, 130, 180], [190, 255, 255], [152, 251, 152],
|
| 151 |
+
[107, 142, 35], [0, 170, 30], [255, 255, 128], [250, 0, 30],
|
| 152 |
+
[100, 140, 180], [220, 128, 128], [222, 40,
|
| 153 |
+
40], [100, 170, 30],
|
| 154 |
+
[40, 40, 40], [33, 33, 33], [100, 128, 160], [20, 20, 255],
|
| 155 |
+
[142, 0, 0], [70, 100, 150], [250, 171, 30], [250, 172, 30],
|
| 156 |
+
[250, 173, 30], [250, 174, 30], [250, 175,
|
| 157 |
+
30], [250, 176, 30],
|
| 158 |
+
[210, 170, 100], [153, 153, 153], [153, 153, 153],
|
| 159 |
+
[128, 128, 128], [0, 0, 80], [210, 60, 60], [250, 170, 30],
|
| 160 |
+
[250, 170, 30], [250, 170, 30], [250, 170,
|
| 161 |
+
30], [250, 170, 30],
|
| 162 |
+
[250, 170, 30], [192, 192, 192], [192, 192, 192],
|
| 163 |
+
[192, 192, 192], [220, 220, 0], [220, 220, 0], [0, 0, 196],
|
| 164 |
+
[192, 192, 192], [220, 220, 0], [140, 140, 20], [119, 11, 32],
|
| 165 |
+
[150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90],
|
| 166 |
+
[0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110],
|
| 167 |
+
[0, 0, 70], [0, 0, 142], [0, 0, 192], [170, 170, 170],
|
| 168 |
+
[32, 32, 32], [111, 74, 0], [120, 10, 10], [81, 0, 81],
|
| 169 |
+
[111, 111, 0], [0, 0, 0]])
|
| 170 |
+
|
| 171 |
+
def __init__(self,
|
| 172 |
+
img_suffix='.jpg',
|
| 173 |
+
seg_map_suffix='.png',
|
| 174 |
+
**kwargs) -> None:
|
| 175 |
+
super().__init__(
|
| 176 |
+
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
mmseg/datasets/night_driving.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .cityscapes import CityscapesDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class NightDrivingDataset(CityscapesDataset):
|
| 8 |
+
"""NightDrivingDataset dataset."""
|
| 9 |
+
|
| 10 |
+
def __init__(self,
|
| 11 |
+
img_suffix='_leftImg8bit.png',
|
| 12 |
+
seg_map_suffix='_gtCoarse_labelTrainIds.png',
|
| 13 |
+
**kwargs) -> None:
|
| 14 |
+
super().__init__(
|
| 15 |
+
img_suffix=img_suffix, seg_map_suffix=seg_map_suffix, **kwargs)
|
mmseg/datasets/pascal_context.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
|
| 4 |
+
from mmseg.registry import DATASETS
|
| 5 |
+
from .basesegdataset import BaseSegDataset
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@DATASETS.register_module()
|
| 9 |
+
class PascalContextDataset(BaseSegDataset):
|
| 10 |
+
"""PascalContext dataset.
|
| 11 |
+
|
| 12 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 13 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 14 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 15 |
+
fixed to '.png'.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
ann_file (str): Annotation file path.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
METAINFO = dict(
|
| 22 |
+
classes=('background', 'aeroplane', 'bag', 'bed', 'bedclothes',
|
| 23 |
+
'bench', 'bicycle', 'bird', 'boat', 'book', 'bottle',
|
| 24 |
+
'building', 'bus', 'cabinet', 'car', 'cat', 'ceiling',
|
| 25 |
+
'chair', 'cloth', 'computer', 'cow', 'cup', 'curtain', 'dog',
|
| 26 |
+
'door', 'fence', 'floor', 'flower', 'food', 'grass', 'ground',
|
| 27 |
+
'horse', 'keyboard', 'light', 'motorbike', 'mountain',
|
| 28 |
+
'mouse', 'person', 'plate', 'platform', 'pottedplant', 'road',
|
| 29 |
+
'rock', 'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow',
|
| 30 |
+
'sofa', 'table', 'track', 'train', 'tree', 'truck',
|
| 31 |
+
'tvmonitor', 'wall', 'water', 'window', 'wood'),
|
| 32 |
+
palette=[[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 33 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 34 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 35 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 36 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 37 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 38 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 39 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 40 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 41 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 42 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 43 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 44 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 45 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 46 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]])
|
| 47 |
+
|
| 48 |
+
def __init__(self,
|
| 49 |
+
ann_file: str,
|
| 50 |
+
img_suffix='.jpg',
|
| 51 |
+
seg_map_suffix='.png',
|
| 52 |
+
**kwargs) -> None:
|
| 53 |
+
super().__init__(
|
| 54 |
+
img_suffix=img_suffix,
|
| 55 |
+
seg_map_suffix=seg_map_suffix,
|
| 56 |
+
ann_file=ann_file,
|
| 57 |
+
reduce_zero_label=False,
|
| 58 |
+
**kwargs)
|
| 59 |
+
assert self.file_client.exists(
|
| 60 |
+
self.data_prefix['img_path']) and osp.isfile(self.ann_file)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@DATASETS.register_module()
|
| 64 |
+
class PascalContextDataset59(BaseSegDataset):
|
| 65 |
+
"""PascalContext dataset.
|
| 66 |
+
|
| 67 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 68 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 69 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 70 |
+
fixed to '.png'.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
ann_file (str): Annotation file path.
|
| 74 |
+
"""
|
| 75 |
+
METAINFO = dict(
|
| 76 |
+
classes=('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
|
| 77 |
+
'bird', 'boat', 'book', 'bottle', 'building', 'bus',
|
| 78 |
+
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
|
| 79 |
+
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
|
| 80 |
+
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
|
| 81 |
+
'keyboard', 'light', 'motorbike', 'mountain', 'mouse',
|
| 82 |
+
'person', 'plate', 'platform', 'pottedplant', 'road', 'rock',
|
| 83 |
+
'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa',
|
| 84 |
+
'table', 'track', 'train', 'tree', 'truck', 'tvmonitor',
|
| 85 |
+
'wall', 'water', 'window', 'wood'),
|
| 86 |
+
palette=[[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
|
| 87 |
+
[120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 88 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 89 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 90 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 91 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 92 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 93 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 94 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 95 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 96 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 97 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 98 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 99 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 100 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]])
|
| 101 |
+
|
| 102 |
+
def __init__(self,
|
| 103 |
+
ann_file: str,
|
| 104 |
+
img_suffix='.jpg',
|
| 105 |
+
seg_map_suffix='.png',
|
| 106 |
+
reduce_zero_label=True,
|
| 107 |
+
**kwargs):
|
| 108 |
+
super().__init__(
|
| 109 |
+
img_suffix=img_suffix,
|
| 110 |
+
seg_map_suffix=seg_map_suffix,
|
| 111 |
+
ann_file=ann_file,
|
| 112 |
+
reduce_zero_label=reduce_zero_label,
|
| 113 |
+
**kwargs)
|
| 114 |
+
assert self.file_client.exists(
|
| 115 |
+
self.data_prefix['img_path']) and osp.isfile(self.ann_file)
|
mmseg/datasets/potsdam.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class PotsdamDataset(BaseSegDataset):
|
| 8 |
+
"""ISPRS Potsdam dataset.
|
| 9 |
+
|
| 10 |
+
In segmentation map annotation for Potsdam dataset, 0 is the ignore index.
|
| 11 |
+
``reduce_zero_label`` should be set to True. The ``img_suffix`` and
|
| 12 |
+
``seg_map_suffix`` are both fixed to '.png'.
|
| 13 |
+
"""
|
| 14 |
+
METAINFO = dict(
|
| 15 |
+
classes=('impervious_surface', 'building', 'low_vegetation', 'tree',
|
| 16 |
+
'car', 'clutter'),
|
| 17 |
+
palette=[[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
|
| 18 |
+
[255, 255, 0], [255, 0, 0]])
|
| 19 |
+
|
| 20 |
+
def __init__(self,
|
| 21 |
+
img_suffix='.png',
|
| 22 |
+
seg_map_suffix='.png',
|
| 23 |
+
reduce_zero_label=True,
|
| 24 |
+
**kwargs) -> None:
|
| 25 |
+
super().__init__(
|
| 26 |
+
img_suffix=img_suffix,
|
| 27 |
+
seg_map_suffix=seg_map_suffix,
|
| 28 |
+
reduce_zero_label=reduce_zero_label,
|
| 29 |
+
**kwargs)
|
mmseg/datasets/refuge.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import mmengine.fileio as fileio
|
| 3 |
+
|
| 4 |
+
from mmseg.registry import DATASETS
|
| 5 |
+
from .basesegdataset import BaseSegDataset
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@DATASETS.register_module()
|
| 9 |
+
class REFUGEDataset(BaseSegDataset):
|
| 10 |
+
"""REFUGE dataset.
|
| 11 |
+
|
| 12 |
+
In segmentation map annotation for REFUGE, 0 stands for background, which
|
| 13 |
+
is not included in 2 categories. ``reduce_zero_label`` is fixed to True.
|
| 14 |
+
The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 15 |
+
'.png'.
|
| 16 |
+
"""
|
| 17 |
+
METAINFO = dict(
|
| 18 |
+
classes=('background', ' Optic Cup', 'Optic Disc'),
|
| 19 |
+
palette=[[120, 120, 120], [6, 230, 230], [56, 59, 120]])
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs) -> None:
|
| 22 |
+
super().__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert fileio.exists(
|
| 28 |
+
self.data_prefix['img_path'], backend_args=self.backend_args)
|
mmseg/datasets/stare.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from mmseg.registry import DATASETS
|
| 3 |
+
from .basesegdataset import BaseSegDataset
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class STAREDataset(BaseSegDataset):
|
| 8 |
+
"""STARE dataset.
|
| 9 |
+
|
| 10 |
+
In segmentation map annotation for STARE, 0 stands for background, which is
|
| 11 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 12 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 13 |
+
'.ah.png'.
|
| 14 |
+
"""
|
| 15 |
+
METAINFO = dict(
|
| 16 |
+
classes=('background', 'vessel'),
|
| 17 |
+
palette=[[120, 120, 120], [6, 230, 230]])
|
| 18 |
+
|
| 19 |
+
def __init__(self,
|
| 20 |
+
img_suffix='.png',
|
| 21 |
+
seg_map_suffix='.ah.png',
|
| 22 |
+
reduce_zero_label=False,
|
| 23 |
+
**kwargs) -> None:
|
| 24 |
+
super().__init__(
|
| 25 |
+
img_suffix=img_suffix,
|
| 26 |
+
seg_map_suffix=seg_map_suffix,
|
| 27 |
+
reduce_zero_label=reduce_zero_label,
|
| 28 |
+
**kwargs)
|
| 29 |
+
assert self.file_client.exists(self.data_prefix['img_path'])
|