#!/usr/bin/env python
# @Date    : 2023-03-01
# @Author  : Bright (brt2@qq.com)
# @Link    : https://gitee.com/brt2
# @Version : 0.1.0

import numpy as np

WITHOUT_TORCH, WITHOUT_CV2 = False, False
try:
    from torch.utils.data import DataLoader
    from torchvision.datasets import ImageFolder
except ImportError:
    WITHOUT_TORCH = True

try:
    import cv2
except ImportError:
    WITHOUT_CV2 = True

try:
    from logzero import logger
except ImportError:
    from logging import getLogger
    logger = getLogger(__name__)
finally:
    logger.setLevel(10)

#####################################################################
# util-v0.2.1
import os.path
from glob import glob

def insensitive_glob(pattern):
    def either(c):
        return '[%s%s]' % (c.lower(), c.upper()) if c.isalpha() else c
    return glob.glob(''.join(map(either, pattern)))

def glob_dir(path_dir, allow_extends=None, recursive: bool=True):
    """ copy from: imagededup/utils/general_utils.py """
    glob_pattern = '**/*' if recursive else '*'
    glob_pattern = os.path.join(path_dir, glob_pattern)
    func_glob = lambda glob_pattern: [
        i  # i.absolute()
        for i in glob(glob_pattern, recursive=recursive)
        if not (i.startswith('.') or os.path.isdir(i))
    ]

    list_files = []
    if allow_extends:
        for ext in allow_extends:
            list_files.extend(func_glob(glob_pattern + ext))
    else:
        list_files = func_glob(glob_pattern)
    return list_files

def glob_imgs(image_dir):
    return glob_dir(image_dir, allow_extends=(".jpg", ".jpeg", ".png", "webp"), recursive=True)

#####################################################################

class BaseImageDataset:
    """ 并不依赖于: torch.utils.data.Dataset

        参考torchvision.datasets.ImageFolder属性:
        * samples: List of (sample path, class_index) tuples
        * targets: The class_index value for each image in the dataset
        * classes: List of the class names sorted alphabetically
        * class_to_idx: Dict with items (class_name, class_index)
    """
    def __init__(self, transform=None, image_loader="pil"):
        self.transform = transform
        self._build_loader(image_loader)
        # self.build_classes()

    def _build_loader(self, image_loader: str):
        try:
            from torchvision.datasets.folder import pil_loader
        except ImportError:
            from PIL import Image

            def pil_loader(path: str) -> Image.Image:
                with open(path, "rb") as f:
                    img = Image.open(f)
                    return img.convert("RGB")

        if not WITHOUT_CV2:
            def cv2_loader(uri, to_gray=False):
                mode = cv2.IMREAD_GRAYSCALE if to_gray else cv2.IMREAD_COLOR
                img = cv2.imread(uri, mode)
                return img

        self.loader = {
            "pil": pil_loader,
            "cv2": cv2_loader,
            "cv2_gray": lambda x: cv2_loader(x, to_gray=True)
        }[image_loader]

    def build_classes(self, class_names: list, class_index=None):
        self.classes = class_names
        class_index = class_index or range(len(self.classes))
        self.class_to_idx = dict(zip(self.classes, class_index))
        # self.idx_to_class = {i:c for c, i in self.class_to_idx.items()}
        raise NotImplementedError("需要【BaseImageDataset】子类重定义self.samples实现")

    def __len__(self) -> int:
        return len(self.samples)

    def _load_itemdata(self, path):
        imgdata = self.loader(path)
        if self.transform is not None:
            imgdata = self.transform(imgdata)
        return imgdata

    def load_sample(self, sample):
        """ 定义返回哪些数据，如有必要则重写 """
        path, class_index = sample
        return self._load_itemdata(path), class_index

    def __getitem__(self, item):
        """ item: int 或 **slice**
            return Tuple (X, fname)
            其中，X类型要求仅可以是: tensors, numpy arrays, numbers, dicts or lists
        """
        sample = self.samples[item]
        if isinstance(item, slice):
            # indx = range(len(self))[item]
            return [self.load_sample(s) for s in sample]
        else:
            return self.load_sample(sample)

class FlatImageDirWithLabels(BaseImageDataset):
    def __init__(self, root: str, fpath_target, transform=None, image_loader="pil"):
        super().__init__(transform,image_loader)

        self._imgdir = root
        self.build_classes(fpath_target)

    def build_classes(self, fpath_target: str):
        self.samples = []
        with open(fpath_target, "r", encoding="utf-8") as fp:
            for line in fp.readlines():
                fpath, index = line.split()
                self.samples.append((fpath, int(index)))
        self.class_to_idx = dict(self.samples)

    def load_sample(self, sample):
        fname, class_index = sample
        path = os.path.join(self._imgdir, fname)
        return self._load_itemdata(path), class_index

class FlatImageDirWithoutLabels(BaseImageDataset):
    """ 扁平化的图片目录（兼容子目录），以filepath作为标签 """
    def __init__(self, root, transform=None, image_loader="pil"):
        super().__init__(transform,image_loader)
        self.build_classes(root)

    def build_classes(self, root: str):
        self.samples = [(p, i) for i, p in enumerate(glob_imgs(root))]
        self.class_to_idx = dict(self.samples)

#####################################################################

class NumpyDataLoader:
    def __init__(self, dataset, batch_size=1):
        self.dataset = dataset
        self.batch_size = batch_size

        from math import ceil
        self.batch_length = ceil(len(self.dataset) / self.batch_size)

    def __len__(self) -> int:
        return self.batch_length

    def get_batch(self, index):
        """ return ndarray of (X, Y) """
        # logger.warning("由于先执行transforms, 当前对象可能ToTensor导致检测到numpy格式报错")
        n_0 = self.batch_size * index
        n_1 = self.batch_size * (index+1)

        X, Y = [], []
        for x, y in self.dataset[n_0: n_1]:
            # print(">>>", x.size)
            X.append(x)
            Y.append(y)

        X = np.stack(X, axis=0)
        Y = np.stack(Y, axis=0)
        return X, Y

    def __getitem__(self, item):
        # i: 表示第几个轮次
        try:
            return self.get_batch(item)
        except ValueError:
            raise StopIteration  # 用于停止循环，也可以使用 IndexError

    def __iter__(self):
        self.batch_index = -1
        return self

    def __next__(self):
        self.batch_index += 1
        if self.batch_index >= len(self):
            raise StopIteration()
        return self.get_batch(self.batch_index)

# inputs, labels = next(iter(numpy_loader))

if WITHOUT_TORCH:
    DataLoader = NumpyDataLoader


if __name__ == "__main__":
    from torchvision.transforms import transforms

    transform = transforms.Compose([
        transforms.Resize((256, 256)),
        # transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225]),
    ])

    ds = ImageDir("headimage2", transform=transform)
    dl = DataLoader(ds, batch_size=4)

    for i, (arr_ims, fnames) in enumerate(dl):
        print("...", i, arr_ims.shape, fnames)

    for i, (arr_ims, fnames) in enumerate(dl):
        print(">>>", i, arr_ims.shape, fnames)

    #####################################################################

    ds = ImageDir(r"D:\Home\workspace\ipynb\annoy/trademark")
    # dl = torchvision.dataset.DataLoader(ds)  # 不支持未经transforms.ToTensor()的PilImage格式
    dl = SimpleDataLoader(ds)  # 不支持Tensor；但支持PilImage作为迭代对象（会被自动转换ndarray）
    # batch_size=4, 如果PIL格式对象不统一做resize，就仅允许batch_size==1，否则不同size无法合并成batch_matrix
    # 如此这般，跟直接轮询dataset也就没区别了:
    for pimg, label in ds:
        print(">>>", pimg.size, label)

    for batch_imarr, labels in dl:  # 按batch_size轮次处理
        print(">>>", batch_imarr.shape)  # (1, 250, 231, 3), 每个batch的图像shape不定
