from collections import OrderedDict
from unittest import TestCase

import numpy as np

from datumaro.components.annotation import Mask
from datumaro.components.dataset import Dataset
from datumaro.components.dataset_base import DatasetItem
from datumaro.components.environment import Environment
from datumaro.components.media import Image
from datumaro.plugins.data_formats.common_semantic_segmentation import (
    CommonSemanticSegmentationImporter,
    make_categories,
)

from tests.requirements import Requirements, mark_requirement
from tests.utils.assets import get_test_asset_path
from tests.utils.test_utils import compare_datasets

DUMMY_DATASET_DIR = get_test_asset_path("common_semantic_segmentation_dataset", "dataset")

DUMMY_NON_STANDARD_DATASET_DIR = get_test_asset_path(
    "common_semantic_segmentation_dataset",
    "non_standard_dataset",
)


class CommonSemanticSegmentationImporterTest(TestCase):
    @mark_requirement(Requirements.DATUM_GENERAL_REQ)
    def test_can_detect(self):
        detected_formats = Environment().detect_dataset(DUMMY_DATASET_DIR)
        self.assertEqual([CommonSemanticSegmentationImporter.NAME], detected_formats)

    @mark_requirement(Requirements.DATUM_GENERAL_REQ)
    def test_can_detect_non_standard_structure(self):
        detected_formats = Environment().detect_dataset(DUMMY_NON_STANDARD_DATASET_DIR)
        self.assertEqual([CommonSemanticSegmentationImporter.NAME], detected_formats)

    @mark_requirement(Requirements.DATUM_GENERAL_REQ)
    def test_can_import(self):
        expected_dataset = Dataset.from_iterable(
            [
                DatasetItem(
                    id="0001",
                    media=Image.from_numpy(data=np.ones((1, 5, 3))),
                    annotations=[
                        Mask(image=np.array([[1, 1, 0, 1, 1]]), label=3),
                        Mask(image=np.array([[0, 0, 1, 0, 0]]), label=5),
                    ],
                ),
                DatasetItem(
                    id="0002",
                    media=Image.from_numpy(data=np.ones((1, 5, 3))),
                    annotations=[
                        Mask(image=np.array([[1, 1, 1, 0, 0]]), label=1),
                        Mask(image=np.array([[0, 0, 0, 1, 1]]), label=4),
                    ],
                ),
            ],
            categories=make_categories(
                OrderedDict(
                    [
                        ("Void", (0, 0, 0)),
                        ("Animal", (64, 128, 64)),
                        ("Archway", (192, 0, 128)),
                        ("Bicyclist", (0, 128, 192)),
                        ("Child", (192, 128, 64)),
                        ("Road", (128, 64, 128)),
                    ]
                )
            ),
        )

        dataset = Dataset.import_from(DUMMY_DATASET_DIR, "common_semantic_segmentation")

        compare_datasets(self, expected_dataset, dataset, require_media=True)

    @mark_requirement(Requirements.DATUM_GENERAL_REQ)
    def test_can_import_non_standard_structure(self):
        expected_dataset = Dataset.from_iterable(
            [
                DatasetItem(
                    id="0001",
                    media=Image.from_numpy(data=np.ones((1, 5, 3))),
                    annotations=[
                        Mask(image=np.array([[1, 1, 0, 1, 1]]), label=3),
                        Mask(image=np.array([[0, 0, 1, 0, 0]]), label=5),
                    ],
                ),
                DatasetItem(
                    id="0002",
                    media=Image.from_numpy(data=np.ones((1, 5, 3))),
                    annotations=[
                        Mask(image=np.array([[1, 1, 0, 0, 0]]), label=1),
                        Mask(image=np.array([[0, 0, 1, 0, 0]]), label=5),
                        Mask(image=np.array([[0, 0, 0, 1, 1]]), label=7),
                    ],
                ),
            ],
            categories=make_categories(
                OrderedDict(
                    [
                        ("Void", (0, 0, 0)),
                        ("Animal", (64, 128, 64)),
                        ("Archway", (192, 0, 128)),
                        ("Bicyclist", (0, 128, 192)),
                        ("Child", (192, 128, 64)),
                        ("Road", (128, 64, 128)),
                        ("Pedestrian", (64, 64, 0)),
                        ("SignSymbol", (128, 128, 128)),
                    ]
                )
            ),
        )

        dataset = Dataset.import_from(
            DUMMY_NON_STANDARD_DATASET_DIR,
            "common_semantic_segmentation",
            image_prefix="image_",
            mask_prefix="gt_",
        )

        compare_datasets(self, expected_dataset, dataset, require_media=True)
