import os

from pathlib import Path
import numpy as np
import mmcv
import mmengine.fileio as fileio

from .build_registry import TRANSFORMS


@TRANSFORMS.register('LoadImageFromFile')
def load_image(image_path,
               color_type='color',
               ignore_empty=False,
               to_float32=False,
               imdecode_backend='cv2',
               backend_args=None):
    """
    This function is modified from mmcv-2.0.0:
        mmcv/TRANSFORMS/loading.py#L14
    """

    _backend_args = None
    if backend_args is not None:
        _backend_args = backend_args.copy()

    try:
        img_bytes = fileio.get(image_path, backend_args=_backend_args)
        img = mmcv.imfrombytes(img_bytes, flag=color_type,
                                backend=imdecode_backend)
    except Exception as e:
        if ignore_empty:
            return None
        else:
            raise e

    ori_shape = (img.shape[0], img.shape[1])
    if to_float32:
        img = img.astype(np.float32)

    return img, {'ori_shape': ori_shape}


@TRANSFORMS.register('Resize')
def resize(img,
           input_shape=None,
           keep_ratio=False,
           interpolation='bilinear',
           clip_object_border=True,
           backend='cv2'):

    image_h, image_w = img.shape[:2]
    model_h, model_w = input_shape

    if keep_ratio:
        scale_factor = min(model_h / image_h, model_w / image_w)
        img, scale_factor = mmcv.imrescale(
            img, scale_factor, interpolation=interpolation,
            return_scale=True, backend=backend)
        # the w_scale and h_scale has minor difference
        # a real fix should be done in the mmcv.imrescale in the future
        new_h, new_w = img.shape[:2]
        w_scale = new_w / image_w
        h_scale = new_h / image_h
    else:
        img, w_scale, h_scale = mmcv.imresize(
            img, (model_w, model_h), interpolation=interpolation,
            return_scale=True, backend=backend
        )

    meta = {'scale_factor': (h_scale, w_scale), 'resize_shape': img.shape[:2]}
    return img, meta


@TRANSFORMS.register('ChannelExchange')
def channel_exchange(img):
    img = img[..., [2, 1, 0]]
    return img


@TRANSFORMS.register('Normalize')
def normalize(img,
              mean=[123.675, 116.28, 103.53],
              std=[58.395, 57.12, 57.375],):
    img = (img - np.array(mean)) / np.array(std)
    img = img.astype(np.float32)
    return img


@TRANSFORMS.register('Pad')
def pad(img,
        input_shape=None,
        pad_val=0.0):
    h, w = img.shape[:2]
    if input_shape == (h, w):
        return img

    if isinstance(pad_val, (float, int)):
        pad_val = (pad_val,) * img.ndim
    elif isinstance(pad_val, tuple):
        assert len(pad_val) == img.ndim
    elif isinstance(pad_val, list):
        assert len(pad_val) == img.ndim
        pad_val = tuple(pad_val)

    img = mmcv.impad(img, shape=input_shape, pad_val=pad_val)
    return img, {'pad_shape': img.shape[:2]}


@TRANSFORMS.register('ImageToData')
def image2data(img):
    img = img.transpose(2, 0, 1)
    img = np.expand_dims(img, 0).astype(np.float32)
    return img


@TRANSFORMS.register('RandomCenterCropPad')
def random_center_crop_pad(img,
                           input_shape=None,
                           mean=None,
                           std=None,
                           test_pad_add_pix=0):
    meta = {}
    h, w, c = img.shape
    target_h, target_w = input_shape
    min_margin = test_pad_add_pix
    if h > target_h - min_margin or w > target_w - min_margin:
        img, resize_meta = resize(
            img, keep_ratio=True,
            input_shape=(target_h - min_margin, target_w - min_margin)
        )
        meta.update(resize_meta)
        h, w, c = img.shape

    y_start = (target_h - h) // 2
    y_end = y_start + h
    x_start = (target_w - w) // 2
    x_end = x_start + w

    cropped_img = np.zeros((target_h, target_w, c), dtype=img.dtype)
    for i in range(c):
        cropped_img[:, :, i] += mean[i]
    cropped_img[y_start:y_end, x_start:x_end, :] = img
    meta['border'] = np.array([y_start, y_end, x_start, x_end],
                              dtype=np.float32)

    return cropped_img, meta


@TRANSFORMS.register('GetImageShape')
def get_img_shape(img):
    return img, {'img_shape': img.shape[: 2]}


@TRANSFORMS.register('GetBatchInputShape')
def get_batch_input_shape(img):
    return img, {'batch_input_shape': img.shape[: 2]}
