# Copyright (c) OpenMMLab. All rights reserved.
from numbers import Number
from typing import Any, Dict, List, Optional, Sequence

import torch
from mmengine.model import BaseDataPreprocessor

from mmseg.registry import MODELS
from mmseg.utils import stack_batch


@MODELS.register_module()
class CustomSegDataPreProcessor(BaseDataPreprocessor):
    """Custom Image pre-processor with [0,1] scaling and [-1,1] normalization.

    Modifications:
    1. Added `to_01` option to enable /255 scaling
    2. Added `custom_norm` option to enable (x-0.5)/0.5 normalization

    Steps when both enabled:
        x = x / 255.0           # Scale to [0, 1]
        x = (x - 0.5) / 0.5      # Normalize to [-1, 1]
    """

    def __init__(
            self,
            mean: Sequence[Number] = None,
            std: Sequence[Number] = None,
            to_01: bool = False,
            custom_norm: bool = False,
            size: Optional[tuple] = None,
            size_divisor: Optional[int] = None,
            pad_val: Number = 0,
            seg_pad_val: Number = 255,
            bgr_to_rgb: bool = False,
            rgb_to_bgr: bool = False,
            batch_augments: Optional[List[dict]] = None,
            test_cfg: dict = None,
    ):
        super().__init__()
        self.size = size
        self.size_divisor = size_divisor
        self.pad_val = pad_val
        self.seg_pad_val = seg_pad_val
        self.to_01 = to_01
        self.custom_norm = custom_norm

        assert not (bgr_to_rgb and rgb_to_bgr), (
            '`bgr2rgb` and `rgb2bgr` cannot be set to True at the same time')
        self.channel_conversion = rgb_to_bgr or bgr_to_rgb

        # 启用标准归一化逻辑 (当 custom_norm=False 时使用)
        if mean is not None and not self.custom_norm:
            assert std is not None, 'To enable the normalization in ' \
                                    'preprocessing, please specify both ' \
                                    '`mean` and `std`.'
            self._enable_normalize = True
            self.register_buffer('mean',
                                 torch.tensor(mean).view(-1, 1, 1), False)
            self.register_buffer('std',
                                 torch.tensor(std).view(-1, 1, 1), False)
        else:
            self._enable_normalize = False

        # 启用自定义归一化逻辑
        if self.custom_norm:
            self._enable_normalize = True
            # 使用固定值 0.5 和 0.5
            self.register_buffer('mean', torch.tensor([0.5, 0.5, 0.5]).view(-1, 1, 1), False)
            self.register_buffer('std', torch.tensor([0.5, 0.5, 0.5]).view(-1, 1, 1), False)

        # TODO: support batch augmentations.
        self.batch_augments = batch_augments

        # Support different padding methods in testing
        self.test_cfg = test_cfg

    def forward(self, data: dict, training: bool = False) -> Dict[str, Any]:
        """Perform normalization, padding and bgr2rgb conversion with custom scaling"""
        data = self.cast_data(data)  # type: ignore
        inputs = data['inputs']
        data_samples = data.get('data_samples', None)

        # BGR to RGB conversion
        if self.channel_conversion and inputs[0].size(0) == 3:
            inputs = [_input[[2, 1, 0], ...] for _input in inputs]

        # Convert to float and apply custom scaling
        inputs = [_input.float() for _input in inputs]

        # Step 1: Scale to [0, 1] if enabled
        if self.to_01:
            inputs = [_input / 255.0 for _input in inputs]

        # Step 2: Apply normalization (either standard or custom)
        if self._enable_normalize:
            inputs = [(_input - self.mean) / self.std for _input in inputs]

        # Handle batch stacking and padding
        if training:
            assert data_samples is not None, ('During training, ',
                                              '`data_samples` must be define.')
            inputs, data_samples = stack_batch(
                inputs=inputs,
                data_samples=data_samples,
                size=self.size,
                size_divisor=self.size_divisor,
                pad_val=self.pad_val,
                seg_pad_val=self.seg_pad_val)

            if self.batch_augments is not None:
                inputs, data_samples = self.batch_augments(
                    inputs, data_samples)
        else:
            img_size = inputs[0].shape[1:]
            assert all(input_.shape[1:] == img_size for input_ in inputs), \
                'The image size in a batch should be the same.'
            # pad images when testing
            if self.test_cfg:
                inputs, padded_samples = stack_batch(
                    inputs=inputs,
                    size=self.test_cfg.get('size', None),
                    size_divisor=self.test_cfg.get('size_divisor', None),
                    pad_val=self.pad_val,
                    seg_pad_val=self.seg_pad_val)
                for data_sample, pad_info in zip(data_samples, padded_samples):
                    data_sample.set_metainfo({**pad_info})
            else:
                inputs = torch.stack(inputs, dim=0)

        return dict(inputs=inputs, data_samples=data_samples)