# encoding: UTF-8
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## NUS School of Computing
## Email: yaoyao.liu@nus.edu.sg
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

import argparse
import os
import random
import pickle

import numpy as np
from PIL import Image

# argument parser
"""
本代码默认数据集文件夹下面有train,val,test三个子文件夹，各文件夹存放着分类文件夹
task episodes数据同样保存在数据集文件夹下，形式: 
                                        train_5_way_5_shot_100_episodeTestSampleNum_20000_episodes.pkl
                                        val_5_way_5_shot_100_episodeTestSampleNum_600_episodes.pkl
                                        test_5_way_5_shot_100_episodeTestSampleNum_600_episodes.pkl

数据集目录结构如下：
train
    n13133613
        n1313361300001275.jpg
        n1313361300001276.jpg
        n1313361300001279.jpg
        ......
    n21345221
    ......
val
    n23133623
    n11645231
    ......
test
    n21733819
    n32846219
    ......
train_5_way_5_shot_15_episodeTestSampleNum_20000_episodes.pkl
val_5_way_5_shot_15_episodeTestSampleNum_600_episodes.pkl
test_5_way_5_shot_15_episodeTestSampleNum_600_episodes.pkl
"""


class MiniImageNetDataLoader(object):
    def __init__(self, way_num, shot_num, episode_test_sample_num, train_episodes_num,
                 val_episodes_num, test_episodes_num, dataset_dir):
        """
        pkl_names:
        train_5_way_5_shot_15_episodeTestSampleNum_20000_episodes.pkl
        val_5_way_5_shot_15_episodeTestSampleNum_600_episodes.pkl
        test_5_way_5_shot_15_episodeTestSampleNum_600_episodes.pkl
        pkl_paths = [
        xxx/xxx/xxx/train_5_way_5_shot_15_episodeTestSampleNum_20000_episodes.pkl,
        xxx/xxx/xxx/val_5_way_5_shot_15_episodeTestSampleNum_600_episodes.pkl,
        xxx/xxx/xxx/test_5_way_5_shot_15_episodeTestSampleNum_600_episodes.pkl
        """
        self.titles = ['train', 'val', 'test']
        self.dataset_dir = dataset_dir

        self.dataset_sub_folders = [os.path.join(self.dataset_dir, title) for title in self.titles]
        self._check_dataset_sub_folders()  # check dataset_sub_folders whether exist

        self.way_num = way_num
        self.shot_num = shot_num
        self.episode_test_sample_num = episode_test_sample_num
        self.num_samples_per_way = shot_num + episode_test_sample_num
        self.episodes_num = [train_episodes_num, val_episodes_num, test_episodes_num]

        self.pkl_names, self.pkl_paths = self._check_pkl_files_exist_or_renew()

        self.train_dataset, self.val_dataset, self.test_dataset = self._load_dataset()

    def _check_dataset_sub_folders(self):
        # check the dataset_dir whether exist
        # xxx/train
        # xxx/val
        # xxx/test
        for _folder in self.dataset_sub_folders:
            if not os.path.exists(_folder):
                print(_folder)
                raise FileNotFoundError("Error: directories of train, val and test should all exist !!!")

    def _check_pkl_files_exist_or_renew(self):
        pkl_names = []
        pkl_paths = []
        for idx, title, title_episodes_num in zip(range(len(self.titles)), self.titles, self.episodes_num):
            pkl_name = "{}_{}_way_{}_shot_{}_episodeTestSampleNum_{}_episodes.pkl". \
                format(title, self.way_num, self.shot_num, self.episode_test_sample_num, title_episodes_num)
            pkl_names.append(pkl_name)

            pkl_path = os.path.join(self.dataset_dir, pkl_name)
            pkl_paths.append(pkl_path)

            if not os.path.exists(pkl_path):
                print("Warning: {} cannot be found, recreate the file ...".format(pkl_path))
                source_path = self.dataset_sub_folders[idx]
                self._create_pkl(title, source_path, pkl_path, title_episodes_num)
        return pkl_names, pkl_paths

    def _create_pkl(self, title, source_path, pkl_path, title_episodes_num):
        """
        source_path looks like:
            xxx/xxx/xxx/train
        pkl_path looks like:
            xxx/xxx/xxx/train_5_way_5_shot_100_episodeTestSampleNum_20000_episodes.pkl
        """
        pkl_file = open(pkl_path, "wb")

        # class_to_images  {n123321: [train/n123321/432.jpg, ...], ...}
        class_to_images = dict()
        for class_name in os.listdir(source_path):
            # recording_path       train/n123321
            recording_path = os.path.join(title, class_name)
            # source_sub_folder_path       xxx/xxx/train/n123321
            source_sub_folder_path = os.path.join(source_path, class_name)

            class_to_images[class_name] = []
            for img_name in os.listdir(source_sub_folder_path):
                # img_name       135125253.jpg
                # img_path       train/n123321/135125253.jpg
                img_path = os.path.join(recording_path, img_name)
                class_to_images[class_name].append(img_path)

        all_classes = list(class_to_images.keys())
        all_episode_records = {'train_images': [], 'train_labels': [], 'test_images': [], 'test_labels': [], }
        for _ in range(title_episodes_num):
            selected_classes = random.sample(all_classes, self.way_num)
            random.shuffle(selected_classes)

            per_episode_train_images = []
            per_episode_train_labels = []
            per_episode_test_images = []
            per_episode_test_labels = []
            for label, img_class in enumerate(selected_classes):
                image_paths = random.sample(class_to_images[img_class], self.num_samples_per_way)

                per_episode_train_images.extend(image_paths[: self.shot_num])
                per_episode_train_labels.extend([label] * self.shot_num)

                per_episode_test_images.extend(image_paths[self.shot_num:])
                per_episode_test_labels.extend([label] * self.episode_test_sample_num)

            all_episode_records['train_images'].append(per_episode_train_images)
            all_episode_records['train_labels'].append(per_episode_train_labels)
            all_episode_records['test_images'].append(per_episode_test_images)
            all_episode_records['test_labels'].append(per_episode_test_labels)

        pickle.dump(all_episode_records, pkl_file)
        pkl_file.close()

    def _load_dataset(self):
        # single datasets
        # {'train_images': [], 'train_labels': [], 'test_images': [], 'test_labels': [], }
        tri_datasets = []
        for pkl_path in self.pkl_paths:
            f = open(pkl_path, 'rb')
            tri_datasets.append(pickle.load(f))
            f.close()
        return tri_datasets


def get_batch(_dataset, episode_index):
    episode_train_images = _dataset['train_images'][episode_index]
    episode_train_labels = _dataset['train_labels'][episode_index]
    episode_test_images = _dataset['test_images'][episode_index]
    episode_test_labels = _dataset['test_labels'][episode_index]
    return episode_train_images, episode_train_labels, episode_test_images, episode_test_labels


class MiniImageNetDataSet(object):
    def __init__(self, way_num, shot_num, episode_test_sample_num,
                 train_episodes_num, val_episodes_num, test_episodes_num, dataset_dir):

        dataset_dir = os.path.expanduser(dataset_dir)

        data_loader = MiniImageNetDataLoader(way_num, shot_num, episode_test_sample_num, train_episodes_num,
                                             val_episodes_num, test_episodes_num, dataset_dir)

        self.train_dataset, self.val_dataset, self.test_dataset = \
            data_loader.train_dataset, data_loader.val_dataset, data_loader.test_dataset

        self.dataset_dir = dataset_dir
        self.way_num = way_num
        self.shot_num = shot_num

    def get_train_batch(self, episode_index):
        # {'train_images': [], 'train_labels': [], 'test_images': [], 'test_labels': [], }
        episode_train_images, episode_train_labels, episode_test_images, episode_test_labels = \
            get_batch(self.train_dataset, episode_index)

        return self.process_batch(episode_train_images, episode_train_labels, episode_test_images, episode_test_labels)

    def get_val_batch(self, episode_index):
        episode_train_images, episode_train_labels, episode_test_images, episode_test_labels = \
            get_batch(self.val_dataset, episode_index)

        return self.process_batch(episode_train_images, episode_train_labels, episode_test_images, episode_test_labels)

    def get_test_batch(self, episode_index):
        episode_train_images, episode_train_labels, episode_test_images, episode_test_labels = \
            get_batch(self.test_dataset, episode_index)

        return self.process_batch(episode_train_images, episode_train_labels, episode_test_images, episode_test_labels)

    def process_batch(self, episode_train_images, episode_train_labels, episode_test_images, episode_test_labels):
        # episode_train_images, episode_train_labels, episode_test_images, episode_test_labels
        def read_image_array(_image_path):
            _image_path = os.path.join(self.dataset_dir, _image_path)
            _image_array = np.array(Image.open(_image_path).convert("RGB"))
            return _image_array

        def read_label_array(_label):
            one_shot = np.zeros(self.way_num)
            one_shot[_label] = 1
            return one_shot

        _train_images = []
        _train_labels = []
        _test_images = []
        _test_labels = []
        for image_path in episode_train_images:
            _train_images.append(read_image_array(image_path))
        for label in episode_train_labels:
            _train_labels.append(read_label_array(label))
        for image_path in episode_test_images:
            _test_images.append(read_image_array(image_path))
        for label in episode_test_labels:
            _test_labels.append(read_label_array(label))

        _np_train_images, _np_train_labels, _np_test_images, _np_test_labels = \
            np.array(_train_images) / 255.0, np.array(_train_labels), \
            np.array(_test_images) / 255.0, np.array(_test_labels)

        return _np_train_images, _np_train_labels, _np_test_images, _np_test_labels


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('--dataset_dir', type=str)
    parser.add_argument('--way_num', type=int)
    parser.add_argument('--shot_num', type=int)
    parser.add_argument('--episode_test_sample_num', type=int)
    parser.add_argument('--train_episodes_num', type=int)
    parser.add_argument('--val_episodes_num', type=int)
    parser.add_argument('--test_episodes_num', type=int)

    args = parser.parse_args()

    minidataset = MiniImageNetDataSet(args.way_num, args.shot_num, args.episode_test_sample_num,
                                      args.train_episodes_num, args.val_episodes_num,
                                      args.test_episodes_num, args.dataset_dir)

    for i in range(args.train_episodes_num):
        train_images, train_labels, test_images, test_labels = minidataset.get_train_batch(i)
        print(i)

#########################  Demo  ###########################

"""
from mini_imagenet_dataloader import MiniImageNetDataSet

minidataset = MiniImageNetDataSet(5,5,15,20000,600,800,'~/processed_images/')

train_images, train_labels, test_images, test_labels = minidataset.get_train_batch(100)

print(train_images, train_labels, test_images, test_labels)
print(train_images.shape)
print(train_labels.shape)
print(test_images.shape)
print(test_labels.shape)

for i in range(20000):
    train_images, train_labels, test_images, test_labels = minidataset.get_train_batch(i)
    print(i)
"""

"""
python ~/testX/mini-imagenet-tools/mini_imagenet_dataloader.py --dataset_dir ~/processed_images --way_num 5 --shot_num 5 --episode_test_sample_num 15 --train_episodes_num 20000 --val_episodes_num 600 --test_episodes_num 800
"""