import os
import pathlib
from functools import partial
from typing import Iterable, Callable, Tuple, Union, List

import numpy as np
import torch
import torchvision
import tqdm
from sklearn.model_selection import train_test_split, KFold
from torch.utils.data import DataLoader
from torchvision.datasets.folder import IMG_EXTENSIONS
from torchvision.datasets.folder import default_loader

from .utils import show_batch
from .auto_augment import auto_augment_policy, AutoAugment

__all__ = ["dataloader"]

default_auto_augment = {
    "train": torchvision.transforms.Compose([
        AutoAugment(auto_augment_policy()),
        torchvision.transforms.Resize((224, 224)),
        torchvision.transforms.ToTensor(),
    ]),
    "val": torchvision.transforms.Compose([
        torchvision.transforms.Resize((224, 224)),
        torchvision.transforms.ToTensor(),
    ])}


def default_valid_img_file(file: pathlib.Path) -> bool:
    """通过文件名判断是否为图片文件"""
    file = pathlib.Path(file)
    return not file.name.startswith(".") and file.suffix.lower() in IMG_EXTENSIONS


def default_valid_npy_file(file: pathlib.Path) -> bool:
    file = pathlib.Path(file)
    return not file.name.startswith(".") and file.suffix.lower() == ".npy"


def default_npz_loader(file):
    return torch.from_numpy(np.load(file))


def default_label(file: pathlib.Path) -> str:
    """通过文件名获取标签名的方式"""
    return file.parent.name


def find_img(root, is_valid_file=default_valid_img_file, get_label=default_label,
             class_to_idx=None) \
        -> Tuple[Iterable[Tuple[pathlib.Path, int]], dict]:
    """递归查找root下的所有文件"""
    labels, images = [], []
    class_to_idx = {} if class_to_idx is None else class_to_idx

    def recursive_find_file(root_path):
        assert isinstance(root_path, pathlib.Path)
        if not root_path.exists():
            raise FileNotFoundError(f"{root_path} not exists")
        if root_path.is_file():
            if is_valid_file(root_path):
                images.append(root_path)
                label = get_label(root_path)
                if label not in class_to_idx:
                    class_to_idx[label] = len(class_to_idx)
                labels.append(class_to_idx[label])
        else:
            for child in root_path.iterdir():
                recursive_find_file(child)

    recursive_find_file(pathlib.Path(root))
    return zip(images, labels), class_to_idx


class SimpleDataset(torch.utils.data.Dataset):
    def __init__(self, samples, class_to_idx, loader=default_loader, transform=None, target_transform=None):
        super().__init__()
        self.samples = samples
        self.loader = loader
        self.transform = transform
        self.target_transform = target_transform
        self.class_to_idx = {k: i for i, k in enumerate(sorted(class_to_idx))}
        self.idx_table = torch.tensor(
            [self.class_to_idx[k] for k in class_to_idx])

    def __len__(self) -> int:
        return len(self.samples)

    @staticmethod
    def from_torchvision_dataset(dataset, transform=None, target_transform=None) -> 'SimpleDataset':
        return SimpleDataset(dataset.samples, dataset.class_to_idx, dataset.loader, transform, target_transform)

    def __getitem__(self, item):
        image, label = self.samples[item]
        image = self.loader(image)
        if self.transform is not None:
            image = self.transform(image)
        if self.target_transform is not None:
            label = self.target_transform(label)
        return image, self.idx_table[label]


class DataLoaders:
    def __init__(self, *dls):
        assert len(dls) == 2
        self.dls = dls

    def __getitem__(self, item):
        return self.dls[item]

    @property
    def num_classes(self):
        return len(self.dls[0].dataset.class_to_idx)

    @property
    def class_to_idx(self):
        return self.dls[0].dataset.class_to_idx

    @property
    def num_samples(self):
        t, v = self.dls
        return {"train": len(t.dataset), "val": len(v.dataset)}

    def show_train_batch(self, rows=4, cols=4, figsize=(10, 6)):
        show_batch(self.dls[0], rows, cols, figsize)

    def show_val_batch(self, rows=4, cols=4, figsize=(10, 6)):
        show_batch(self.dls[1], rows, cols, figsize)


class DataLoaderUtil:
    preprocess_data = None
    load_preprocessed_data = None

    @staticmethod
    def from_root(root: Union[str, Iterable[str]], val_rate: float = 0.2,
                  batch_size: int = 32,
                  shuffle: bool = True,
                  val_shuffle: bool = True,
                  transform: Union[Callable,
                                   None] = default_auto_augment["train"],
                  target_transform: Union[Callable, None] = None,
                  val_transform: Union[Callable,
                                       None] = default_auto_augment["val"],
                  val_target_transform: Union[Callable, None] = None,
                  collate_fn: Union[Callable, None] = None,
                  drop_last: bool = False,
                  loader: Callable = default_loader,
                  is_valid_file: Callable = default_valid_img_file,
                  default_label: Callable = default_label,
                  **kwargs
                  ) -> DataLoaders:
        assert 0 <= val_rate < 1
        class_to_idx = {}
        if isinstance(root, str):
            samples, class_to_idx = find_img(root, is_valid_file=is_valid_file, get_label=default_label,
                                             class_to_idx=class_to_idx)
            samples = list(samples)
        else:
            samples = []
            for r in root:
                s, class_to_idx = find_img(r, is_valid_file=is_valid_file, get_label=default_label,
                                           class_to_idx=class_to_idx)
                samples.extend(s)
        if val_rate == 0:
            train_sample, val_sample = samples, []
        else:
            train_sample, val_sample = train_test_split(
                samples, test_size=val_rate, shuffle=shuffle)
        train_dataset = SimpleDataset(
            train_sample, class_to_idx, loader, transform, target_transform)
        val_dataset = SimpleDataset(
            val_sample, class_to_idx, loader, val_transform, val_target_transform)
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn,
                                  drop_last=drop_last, **kwargs)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=val_shuffle, collate_fn=collate_fn,
                                drop_last=drop_last, **kwargs)
        return DataLoaders(train_loader, val_loader)

    @staticmethod
    def from_samples(train_sample: Iterable[Tuple[str, int]],
                     val_sample: Iterable[Tuple[str, int]],
                     class_to_idx: Union[dict, None] = None,
                     batch_size: int = 32,
                     shuffle: bool = True,
                     val_shuffle: bool = True,
                     transform: Union[Callable,
                                      None] = default_auto_augment["train"],
                     target_transform: Union[Callable, None] = None,
                     val_transform: Union[Callable,
                                          None] = default_auto_augment["val"],
                     val_target_transform: Union[Callable, None] = None,
                     collate_fn: Union[Callable, None] = None,
                     drop_last: bool = False,
                     loader: Callable = default_loader,
                     is_valid_file: Callable = default_valid_img_file,
                     **kwargs
                     ) -> DataLoaders:
        assert all([is_valid_file(pair[0]) for pair in train_sample])
        assert all([is_valid_file(pair[0]) for pair in val_sample])
        num_classes = len(set([pair[1] for pair in train_sample]))
        if class_to_idx is None:
            class_to_idx = {str(i): i for i in range(num_classes)}
        train_dataset = SimpleDataset(
            train_sample, class_to_idx, loader, transform, target_transform)
        val_dataset = SimpleDataset(
            val_sample, class_to_idx, loader, val_transform, val_target_transform)
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn,
                                  drop_last=drop_last, **kwargs)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=val_shuffle, collate_fn=collate_fn,
                                drop_last=drop_last, **kwargs)
        return DataLoaders(train_loader, val_loader)

    @staticmethod
    def from_root_with_kfold(root: Union[str, Iterable[str]], k: float = 5,
                             batch_size: int = 32,
                             shuffle: bool = True,
                             val_shuffle: bool = True,
                             transform: Union[Callable,
                                              None] = default_auto_augment["train"],
                             target_transform: Union[Callable, None] = None,
                             val_transform: Union[Callable,
                                                  None] = default_auto_augment["val"],
                             val_target_transform: Union[Callable,
                                                         None] = None,
                             collate_fn: Union[Callable, None] = None,
                             drop_last: bool = False,
                             loader: Callable = default_loader,
                             is_valid_file: Callable = default_valid_img_file,
                             default_label: Callable = default_label,
                             **kwargs
                             ) -> List[DataLoaders]:
        class_to_idx = {}
        if isinstance(root, str):
            samples, class_to_idx = find_img(root, is_valid_file=is_valid_file, get_label=default_label,
                                             class_to_idx=class_to_idx)
            samples = list(samples)
        else:
            samples = []
            for r in root:
                s, class_to_idx = find_img(r, is_valid_file=is_valid_file, get_label=default_label,
                                           class_to_idx=class_to_idx)
                samples.extend(s)
        kfold = KFold(n_splits=k, shuffle=shuffle)
        dlss = []
        for train_idx, val_idx in kfold.split(samples):
            train_sample = [samples[i] for i in train_idx]
            val_sample = [samples[i] for i in val_idx]
            train_dataset = SimpleDataset(
                train_sample, class_to_idx, loader, transform, target_transform)
            val_dataset = SimpleDataset(
                val_sample, class_to_idx, loader, val_transform, val_target_transform)
            train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn,
                                      drop_last=drop_last, **kwargs)
            val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=val_shuffle, collate_fn=collate_fn,
                                    drop_last=drop_last, **kwargs)
            dlss.append(DataLoaders(train_loader, val_loader))
        return dlss

    @staticmethod
    def from_single_folder(root,
                           batch_size: int = 32,
                           shuffle: bool = True,
                           transform: Union[Callable,
                                            None] = default_auto_augment["train"],
                           target_transform: Union[Callable, None] = None,
                           collate_fn: Union[Callable, None] = None,
                           drop_last: bool = False,
                           loader: Callable = default_loader,
                           is_valid_file: Callable = default_valid_img_file,
                           **kwargs
                           ):
        load_from_folder = partial(
            torchvision.datasets.ImageFolder, loader=loader, is_valid_file=is_valid_file)
        dataset = load_from_folder(
            root, transform=transform, target_transform=target_transform)
        dataset = SimpleDataset.from_torchvision_dataset(
            dataset, transform, target_transform)
        loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn, drop_last=drop_last,
                            **kwargs)
        return loader

    @staticmethod
    def from_folder(train_dir, val_dir,
                    batch_size: int = 32,
                    shuffle: bool = True,
                    val_shuffle: bool = False,
                    transform: Union[Callable,
                                     None] = default_auto_augment["train"],
                    target_transform: Union[Callable, None] = None,
                    val_transform: Union[Callable,
                                         None] = default_auto_augment["val"],
                    val_target_transform: Union[Callable, None] = None,
                    collate_fn: Union[Callable, None] = None,
                    drop_last: bool = False,
                    loader: Callable = default_loader,
                    is_valid_file: Callable = default_valid_img_file,
                    **kwargs
                    ) -> DataLoaders:
        train_loader = DataLoaderUtil.from_single_folder(train_dir, batch_size, shuffle, transform, target_transform,
                                                         collate_fn, drop_last, loader, is_valid_file, **kwargs)
        val_loader = DataLoaderUtil.from_single_folder(val_dir, batch_size, val_shuffle, val_transform,
                                                       val_target_transform, collate_fn, drop_last, loader,
                                                       is_valid_file, **kwargs)
        return DataLoaders(train_loader, val_loader)


def preprocess_data(root, target_root, extractor, force=False):
    if not force and os.path.exists(target_root):
        return
    tf = torchvision.transforms.Compose([
        torchvision.transforms.Resize((224, 224)),
        torchvision.transforms.ToTensor(),
    ])

    def loader(file):
        img = default_loader(file)
        return file, tf(img)

    def collate_fn(batch):
        files, images, labels = [], [], []
        for file_and_image, label in batch:
            file, image = file_and_image
            files.append(file)
            images.append(image)
            labels.append(label)
        images = torch.stack(images)
        labels = torch.tensor(labels)
        return files, images, labels

    dls, _ = DataLoaderUtil.from_root(
        root, 0, 32, False, False, None, None, None, None, collate_fn, False, loader)
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    dtype = torch.float16
    extractor.eval()
    extractor.to(device, dtype)

    tq = tqdm.tqdm(dls)
    for files, images, labels in tq:
        with torch.no_grad():
            images = images.to(device, dtype)
            features = extractor(images)
            features = features.cpu()
            for file, feature in zip(files, features):
                relative_path = pathlib.Path(file).relative_to(root)
                target_file = pathlib.Path(
                    target_root) / relative_path.parent / f"{relative_path.stem}"
                if not target_file.parent.exists():
                    os.makedirs(os.path.join(
                        target_root, relative_path.parent))
                np.save(target_file, feature.numpy())
    print("preprocess done!")


dataloader = DataLoaderUtil

dataloader.preprocess_data = preprocess_data
dataloader.load_preprocessed_data = {
    "transform": None,
    "val_transform": None,
    "loader": default_npz_loader,
    "is_valid_file": default_valid_npy_file,
}
