import json
import os
from collections import defaultdict
from functools import partial
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import time
from lib.transforms.transforms import TRANSFORMS
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Union
from abc import abstractmethod
from lib.utils import timer_decorator


# all dataset defined should inherit this class
# and rewrite the 'get_id2category' function and the 'get_img_paths' function
class BaseDataset(Dataset):
    # the type should be capitalized, like 'TRAIN', 'VALIDATE' 'TEST'
    def __init__(self, cfg, type):
        self.type = type
        self.dataset_name = cfg.DATASET.NAME
        # self.split_path = cfg.DATASET.SPLITPATH
        self.default_split_path = 'data_split'
        self.store_path = os.path.join(self.default_split_path, f'{self.dataset_name}.json')

        self.train_class_num = cfg.DATASET.TRAIN_CLASS_NUM
        self.valid_class_num = cfg.DATASET.VALID_CLASS_NUM
        self.test_class_num = cfg.DATASET.TEST_CLASS_NUM

        self.n_way = cfg[type].N_WAY
        self.channel = cfg.MODEL.INPUT_CHANNEL

        # self.multithread = cfg.DATASET.MULTITHREAD
        self.data_path = cfg.DATASET.DATAPATH
        self.category2path = self._get_category2path()
        self.category2id = self._get_category2id()
        self.category_split = self._get_split_categories()

        if not os.path.exists(self.store_path):
            self.split_json = self._generate_split_json()
            print(f'{self.store_path} has been stored')
            # 防止创建train_dataset的时候split一下, 创建validate_dataset的时候再split一下
            # cfg.merge_from_list(['DATASET.SPLITPATH', self.store_path])
        else:
            self.split_json = json.load(open(self.store_path, 'r'))
            print(f'{self.store_path} has been loaded over')
        self.transforms = self._get_transforms(cfg)

        self.default_data_cache = f'data_cache/{self.dataset_name}'
        self.data_cache = f'{self.default_data_cache}/{type}_CACHE.pth'
        if os.path.exists(self.data_cache):
            cache = timer_decorator(torch.load)(self.data_cache, map_location='cpu')
            self.images = cache['images']
            self.labels = cache['labels']
            print(f'{self.data_cache} has been loaded over!!')
        else:
            if not os.path.exists(self.default_data_cache):
                os.makedirs(self.default_data_cache)
            self.images, self.labels = self._create_data_cache()
            print(f'{self.data_cache} has been created')

    @timer_decorator
    def _create_data_cache(self):
        image_paths, labels = self._get_images_labels()
        data_cache = {'labels': labels}
        images = self._multi_read_image(image_paths)
        data_cache['images'] = images
        torch.save(data_cache, self.data_cache)

        return images, labels

    def _get_images_labels(self):
        img_paths = []
        labels = []
        subset = self.split_json[self.type]
        for label, paths in subset.items():
            for path in paths:
                img_paths.append(path)
                labels.append(self.category2id[label])
        img_paths = [(i, img_path) for i, img_path in enumerate(img_paths)]
        return img_paths, torch.tensor(labels)

    # 得到一个category到img_path的字典
    # return: {'dog': [path1, path2, ...], 'cat': [path1, path2, ...]}
    @abstractmethod
    def _get_category2path(self) -> dict:
        return {}

    def _get_split_categories(self) -> dict:
        category_split = {}
        categories = list(self.category2path)
        np.random.shuffle(categories)
        train_categories = categories[:self.train_class_num]
        valid_categories = categories[self.train_class_num:self.train_class_num + self.valid_class_num]
        test_categories = categories[self.train_class_num + self.valid_class_num:]
        category_split['TRAIN'] = train_categories
        category_split['VALIDATE'] = valid_categories
        category_split['TEST'] = test_categories
        return category_split

    def _get_category2id(self) -> dict:
        keys = list(self.category2path.keys())
        return {key: i for i, key in enumerate(keys)}

    # generate the split json file if there is no such a file
    def _generate_split_json(self):

        json_file = {}
        # categories = list(self.category2path)
        # np.random.shuffle(categories)
        # train_categories = categories[:self.train_class_num]
        # valid_categories = categories[self.train_class_num:self.train_class_num + self.valid_class_num]
        # test_categories = categories[self.train_class_num + self.valid_class_num:]

        train_categories = self.category_split['TRAIN']
        valid_categories = self.category_split['VALIDATE']
        test_categories = self.category_split['TEST']

        train_img_paths = {i: self.category2path[i] for i in train_categories}
        json_file['TRAIN'] = train_img_paths

        valid_img_paths = {i: self.category2path[i] for i in valid_categories}
        json_file['VALIDATE'] = valid_img_paths

        test_img_paths = {i: self.category2path[i] for i in test_categories}
        json_file['TEST'] = test_img_paths

        if not os.path.exists(self.default_split_path):
            os.mkdir(self.default_split_path)

        with open(self.store_path, 'w') as file:
            file.write(json.dumps(json_file))

        return json_file

    def _get_transforms(self, cfg):
        trans = cfg[self.type].TRANSFORM_OPERATORS
        if trans:
            trans = list(map(lambda x: TRANSFORMS[x](cfg), trans))
            return transforms.Compose(trans)
        else:
            return None

    def _read_image(self, id_path):
        id = id_path[0]
        path = id_path[1]
        if self.channel == 1:
            image = Image.open(path)
        else:
            image = Image.open(path).convert('RGB')

        if self.type in ['VALIDATE', 'TEST']:
            image = self.transforms(image)

        return (id, image)

    def _multi_read_image(self, id_paths):
        with ThreadPoolExecutor() as executor:
            # 提交任务并获取Future对象
            futures = [executor.submit(self._read_image, id_path) for id_path in id_paths]
            # 获取按照完成顺序排序的结果
            results = [future.result() for future in as_completed(futures)]
            results = sorted(results, key=lambda x: x[0])
            results = [x[1] for x in results]

        if self.type in ['VALIDATE', 'TEST']:
            results = torch.stack(results)

        return results

    # 每一个dataset应该在迭代过程中返回一个episode, 一共要返回多少个由__len__来决定
    def __len__(self):
        return len(self.labels)

    def __getitem__(self, index: Union[list, int]):

        if self.type in ['VALIDATE', 'TEST']:
            image = self.images[index]
            labels = self.labels[index]
            return image, labels

        if isinstance(index, list):
            images_list = []
            for each in index:
                image = self.images[each]
                if isinstance(image, list):
                    temp = []
                    for img in image:
                        img = self.transforms(img)
                        temp.append(img)
                    temp = torch.stack(temp)
                else:
                    temp = self.transforms(image)
                images_list.append(temp)
            image = torch.stack(images_list)

        else:
            temp = self.images[index]
            if isinstance(temp, list):
                image = []
                for img in temp:
                    img = self.transforms(img)
                    image.append(img)
                image = torch.stack(image)
            else:
                image = self.transforms(temp)

        label = self.labels[index]

        return image, label
