# ! ./model

import os

import cv2
import torch
import torch.utils.data
import torch.utils.data.dataloader as DataLoader
import torch.utils.data.dataset as Dataset
import torchvision.transforms as transforms
import torchvision
from torchvision.transforms.transforms import ToTensor

from . import utils
import numpy as np
from PIL import Image


class ImageFactory():

    _to_tensor_transform = torchvision.transforms.Compose(
        [transforms.ToTensor()])

    def __init__(self, cls_txt_path) -> None:
        # raise NotImplementedError("产生不同的图像数据集,可以先手动把数据集分好")
        self.all_cls_list = []
        self.parse_labels(cls_txt_path)

    @classmethod
    def image_to_tensor(cls, img_array):
        tensor = cls._to_tensor_transform(img_array)
        return tensor

    def _default_label_processor(self, line: str):
        return line.split('\n')[0].split(' ')

    def parse_labels(self, cls_txt_path):
        with open(cls_txt_path, 'r', encoding='utf-8') as f:
            for line in f.readlines():
                cls_str = line.split('\n')[0]
                self.all_cls_list.append(cls_str)
        unique_labels = set(self.all_cls_list)
        sorted_labels = list(sorted(unique_labels, key=utils.natural_key))
        self.cls_to_idx_dict = {c: idx for idx, c in enumerate(sorted_labels)}
        self.idx_to_class_dict = dict(
            map(reversed, self.cls_to_idx_dict.items()))

    def get_iterable_datasets(
        self,
        imgs_and_labels_txt_path,
        labels_txt_line_processor=None,
        train_split_ratio=0.7,
        val_split_ratio=0.2,
        test_split_ratio=0.1,
        img_reader_fn=cv2.imread,
    ):
        # assert train_split_ratio + val_split_ratio + test_split_ratio == 1.0, "The split ratios should sum up to 1"
        full_dataset = IterableImageDataset(
            imgs_labels_txt_filepath=imgs_and_labels_txt_path,
            label_txt_line_processor=ImageFactory._default_label_processor,
            cls_to_idx_dict=self.cls_to_idx_dict)

        train_dataset_len = int(len(full_dataset) * train_split_ratio)
        val_dataset_len = int(len(full_dataset) * val_split_ratio)
        test_dataset_len = len(
            full_dataset) - train_dataset_len - val_dataset_len

        train_iterable_dataset, val_iterable_dataset, test_iterable_dataset = torch.utils.data.random_split(
            dataset=full_dataset,
            # FIXME Length
            lengths=[train_dataset_len, val_dataset_len, test_dataset_len])

        return train_iterable_dataset, val_iterable_dataset, test_iterable_dataset, self.idx_to_class_dict

    def get_iterable_image_dataloaders(self,
                                       imgs_and_labels_txt_path,
                                       batch_size,
                                       labels_txt_line_processor=None,
                                       train_split_ratio=1.0,
                                       val_split_ratio=None,
                                       test_split_ratio=None,
                                       img_reader_fn=cv2.imread):

        # Resolve ratio
        if val_split_ratio == None:
            val_split_ratio = test_split_ratio = 0.0
        if test_split_ratio == None:
            val_split_ratio = 1.0 - train_split_ratio

        train_iterable_dataset, val_iterable_dataset, test_iterable_dataset, idx_to_cls_dict = self.get_iterable_datasets(
            imgs_and_labels_txt_path, labels_txt_line_processor,
            train_split_ratio, val_split_ratio, test_split_ratio,
            img_reader_fn)
        # https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html#testing-forward-method-optional
        train_dataloader = torch.utils.data.DataLoader(train_iterable_dataset,
                                                       batch_size=batch_size,
                                                       shuffle=True)
        val_dataloader = torch.utils.data.DataLoader(
            val_iterable_dataset, batch_size=batch_size,
            shuffle=True) if len(val_iterable_dataset) != 0 else None
        test_dataloader = torch.utils.data.DataLoader(
            test_iterable_dataset, batch_size=batch_size,
            shuffle=True) if len(test_iterable_dataset) != 0 else None
        return train_dataloader, val_dataloader, test_dataloader, idx_to_cls_dict


class IterableImageDataset(torch.utils.data.Dataset):
    """这是一个针对label和图片文件夹分离，图片单独统一存放在一个文件夹当中的数据集过滤器。
    实际上，数据集的过滤方法
    
    label.txt 应该对应这样的格式
    
    图片路径（相对） + 标签
    """
    _img_tensor_transformer = transforms.Compose([transforms.ToTensor()])
    _str_to_num_transformer = lambda x: int(str(x).strip())

    def __init__(
        self,
        imgs_labels_txt_filepath,
        label_txt_line_processor,
        cls_to_idx_dict,
        input_transform=_img_tensor_transformer,
        # target_transform=_tensor_transformer,
        target_transform=_str_to_num_transformer,
        is_splited=False,
        enable_transform=True,
    ) -> None:
        super().__init__()
        # NOTE reader can be a function pointer
        self.img_labels = None
        self.img_list = []
        self.label_list = []
        with open(imgs_labels_txt_filepath, 'r', encoding='utf-8') as f:
            for line in f.readlines():
                _ = line.split('\n')[0].split(' ')
                #  Resolve relative Path
                img_abs_path = utils.abs_rel_path_concat(
                    imgs_labels_txt_filepath, _[0])
                self.img_list.append(img_abs_path)
                self.label_list.append(_[1])

        # TODO 更好地封装
        # 其实主要是学习优秀的编程规范，具体任务下指定具体的processor就好，规范好接口
        # building class index

        self.cls_to_idx_dict = cls_to_idx_dict
        self.label_list = [
            self.cls_to_idx_dict[label] for label in self.label_list
        ]
        self.label_txt_line_processor = label_txt_line_processor
        self.input_transform = input_transform
        self.target_transform = target_transform
        self.is_splited = is_splited
        self.enable_transform = enable_transform

    def __len__(self):
        return len(self.label_list)

    def __getitem__(self, index):
        image = cv2.imread(self.img_list[index], cv2.IMREAD_COLOR)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        # image = np.asarray(Image.open(self.img_list[index]))
        label = self.label_list[index]
        if self.input_transform:
            image = self.input_transform(image)
        if self.target_transform:
            label = self.target_transform(label)
        return image, label


if __name__ == "__main__":
    train_iterable_dataset, val_iterable_dataset, test_iterable_dataset = ImageFactory.get_iterable_datasets(
        "../data/expert/expert/", "../data/expert/label.txt")

    for i, example in enumerate(train_iterable_dataset):
        print(i)
        print(list(example))
