import os
import pickle

import numpy as np
import PIL.Image
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.c_transforms as VT
from mindspore.dataset.vision import Inter

class CUB200:
    def __init__(self, root, is_train=True):
        self._root = os.path.expanduser(root)  # Replace ~ by the complete dir  
        self.is_train = is_train


        if os.path.isfile(os.path.join(self._root, 'processed/train.pkl')) and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')):
            print('datasets already cached.')
        else:
            self.__build_cache()


        # Now load the picked data.
        if self.is_train:
            self.datas, self.labels = pickle.load(open(os.path.join(self._root, 'processed/train.pkl'), 'rb'))
            assert (len(self.datas) == 5994 and len(self.labels) == 5994)
        else:
            self.datas, self.labels = pickle.load(open(os.path.join(self._root, 'processed/test.pkl'), 'rb'))
            assert (len(self.datas) == 5794 and len(self.labels) == 5794)

    def __getitem__(self, index):
        """
        Args:
            index, int: Index.
        Returns:
            image, PIL.Image: Image of the given index.
            target, str: target of the given index.
        """
        image, target = self.datas[index], self.labels[index]

        # Doing this so that it is consistent with all other datasets.
        image = PIL.Image.fromarray(image)

        return image, target

    def __len__(self):
        """Length of the dataset.
        Returns:
            length, int: Length of the dataset.
        """
        return len(self.labels)



    def __build_cache(self):
        """Prepare the data for train/test split and save onto disk."""
        image_path = os.path.join(self._root, 'CUB_200_2011/images/')
        # Format of images.txt: <image_id> <image_name>
        id2name = np.genfromtxt(os.path.join(self._root, 'CUB_200_2011/images.txt'), dtype=str)
        # Format of train_test_split.txt: <image_id> <is_training_image>
        id2train = np.genfromtxt(os.path.join(self._root, 'CUB_200_2011/train_test_split.txt'), dtype=int)

        train_data = []
        train_labels = []
        test_data = []
        test_labels = []
        for id_ in range(id2name.shape[0]):
            image = PIL.Image.open(os.path.join(image_path, id2name[id_, 1]))
            label = int(id2name[id_, 1][:3]) - 1  # Label starts with 0

            # Convert gray scale image to RGB image.
            if image.getbands()[0] == 'L':
                image = image.convert('RGB')
            image_np = np.array(image) # ,dtype='uint8'
            # print(image_np.shape)
            image.close()

            if id2train[id_, 1] == 1:
                train_data.append(image_np)
                train_labels.append(label)
            else:
                test_data.append(image_np)
                test_labels.append(label)

        os.makedirs(os.path.join(self._root, 'processed'), exist_ok=True)
        pickle.dump((train_data, train_labels),
                    open(os.path.join(self._root, 'processed/train.pkl'), 'wb'))
        pickle.dump((test_data, test_labels),
                    open(os.path.join(self._root, 'processed/test.pkl'), 'wb'))


def create_dataset(root,batch_size,is_train=True):
    ds.config.set_seed(1)

    dataset_generator = CUB200(root, is_train=is_train)
    
    dataset = ds.GeneratorDataset(dataset_generator, ["image", "label"], shuffle=True)
    
    if is_train:
        transform_img = [
            # vision.Decode(),
            vision.Resize([448, 448], Inter.LINEAR),
            vision.RandomHorizontalFlip(),
            vision.RandomCrop(448),
            vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
            vision.HWC2CHW()
        ]
    else:
        transform_img = [
            # vision.Decode(),
            vision.Resize([448, 448], Inter.LINEAR),
            vision.CenterCrop(448),
            vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
            vision.HWC2CHW()
        ]
    
    dataset = dataset.map(input_columns="image", num_parallel_workers=2, operations=transform_img,
                                      output_columns="image")
    dataset = dataset.map(input_columns="image", num_parallel_workers=2,
                                      operations=lambda x: (x / 255).astype("float32"))
    dataset = dataset.batch(batch_size)

    return dataset


def create_dataset_train(train_path, batch_size):
    """create train dataset"""
    train_data_set = ds.ImageFolderDataset(train_path, shuffle=True)
    # define map operations
    transform_img = [
        vision.Decode(),
        vision.Resize([448, 448], Inter.LINEAR),
        vision.RandomHorizontalFlip(),
        vision.RandomCrop(448),
        vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        vision.HWC2CHW()
    ]
    train_data_set = train_data_set.map(input_columns="image", num_parallel_workers=8, operations=transform_img,
                                        output_columns="image")
    train_data_set = train_data_set.map(input_columns="image", num_parallel_workers=8,
                                        operations=lambda x: (x / 255).astype("float32"))
    train_data_set = train_data_set.batch(batch_size)
    return train_data_set


def create_dataset_test(test_path, batch_size):
    """create test dataset"""
    test_data_set = ds.ImageFolderDataset(test_path, shuffle=False)
    # define map operations
    transform_img = [
        vision.Decode(),
        vision.Resize([448, 448], Inter.LINEAR),
        vision.CenterCrop(448),
        vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        vision.HWC2CHW()
    ]
    test_data_set = test_data_set.map(input_columns="image", num_parallel_workers=8, operations=transform_img,
                                      output_columns="image")
    test_data_set = test_data_set.map(input_columns="image", num_parallel_workers=8,
                                      operations=lambda x: (x / 255).astype("float32"))
    test_data_set = test_data_set.batch(batch_size)
    return test_data_set