import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import mindspore.dataset as ds
import mindspore.common.dtype as mstype
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.transforms.c_transforms as transform

class AnimalDatset:
    def __init__(self, dataset_dir, usage):
        '''
        dataset_dir: animals_dataset的数据集路径
        usage      : 用于区分train或test
        '''
        # 记录类内变量
        self.dataset_dir = dataset_dir
        self.usage = usage

        # 列出animals_dataset目录下的文件夹
        categorys = os.listdir(dataset_dir)
        print("在animals_dataset下找到如下目录:", categorys)

        # 从子目录中读取3个类别的图片，并以list的形式返回
        self.cat = self.read_images(self.usage, "cat")
        self.dog = self.read_images(self.usage, "dog")
        self.panda = self.read_images(self.usage, "panda")

        # 样本太少，数据增强
        if usage == "train":
            self.cats = self.aug_images(self.cat, "cat")
            self.dogs = self.aug_images(self.dog, "dog")
            self.pandas = self.aug_images(self.panda, "panda")
        else:
            self.cats = self.cat
            self.dogs = self.dog
            self.pandas = self.panda

        # 配置类别信息，因为是3分类所以0,1,2，可以自由配置
        # self.cat_labels = [[1, 0, 0] for i in range(len(self.cats))]
        # self.dog_labels = [[0, 1, 0] for i in range(len(self.dogs))]
        # self.panda_labels = [[0, 0, 1] for i in range(len(self.pandas))]
        self.cat_labels = [0 for i in range(len(self.cats))]
        self.dog_labels = [1 for i in range(len(self.dogs))]
        self.panda_labels = [2 for i in range(len(self.pandas))]

        # # 将所有数据集拼起来
        self.data = self.cats + self.dogs + self.pandas
        self.label = self.cat_labels + self.dog_labels + self.panda_labels
        print("总图片样本数:", len(self.data))
        print("总图片标签数:", len(self.label))


    def read_images(self, usage, category):
        '''
        usage      : 用于区分train或test
        category  : 动物类别，可为cat, dog, panda
        '''
        # 列出animals_dataset目录下的图片名
        target_dir = os.path.join(self.dataset_dir, "animals_" + usage, category)
        filenames = os.listdir(target_dir)
        filenames.sort()
        # 遍历该目录读取所有的图片，图片格式为numpy，最终组合成一个list返回
        images = []
        for file in filenames:
            filepath = os.path.join(target_dir, file)
            # 读取的图片为BGR格式，然后转为RGB格式
            img_bgr = cv2.imread(filepath)
            img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
            images.append(img_rgb)
        print("在" + target_dir + "下找到图片数量:", len(images))
        return images


    def aug_images(self, images, category):
        '''
        category   : 动物类别，可为cat, dog, panda
        images     : 对应类别的数据
        '''
        print("正在对" + category + "类别进行数据增强:")
        aug_images = []
        aug_images.extend(images)
        print("原始图片数量:", len(images))

        # 垂直翻转
        vertical = images.copy()
        for i in range(len(vertical)):
            vertical[i] = cv2.flip(vertical[i], 0)
        aug_images.extend(vertical)
        print("得到垂直增强后的图片数量:", len(vertical))

        # 水平翻转
        horizontal = images.copy()
        for i in range(len(horizontal)):
            horizontal[i] = cv2.flip(horizontal[i], 1)
        aug_images.extend(horizontal)
        print("得到水平增强后的图片数量:", len(vertical))

        # 旋转 -45 ~ 45
        rotate = images.copy()
        for i in range(len(rotate)):
            rows, cols = rotate[i].shape[:2]
            center_coordinate = (int(cols / 2), int(rows / 2))
            angle = np.random.uniform(-45, 45)
            rotate_mat = cv2.getRotationMatrix2D(center_coordinate, angle, 1)
            rotate[i] = cv2.warpAffine(rotate[i], rotate_mat, (cols, rows))
        aug_images.extend(rotate)
        print("得到旋转增强后的图片数量:", len(rotate))

        # 调整对比度
        color = images.copy()
        for i in range(len(color)):
            slope = np.random.uniform(0.2, 1)
            bias = np.random.uniform(0, 50)
            color[i] = color[i] * slope + bias
            color[i] = np.clip(color[i], 0, 255)
            color[i] = color[i].astype(np.uint8)
        aug_images.extend(color)
        print("得到对比度增强后的图片数量:", len(rotate))


        print("得到增强后的图片数量:", len(aug_images))
        return aug_images


    def get_normalize(self):
        '''
        求所有样本normalize
        '''
        print("求所有样本normalize, 总图片样本数:", len(self.data))
        mean = np.array([0.0, 0.0, 0.0])
        for index, image in enumerate(self.data):
            if index % 500 == 0:
                print("已处理mean数量:", str(index) + " / " + str(len(self.data)))
            mean += np.mean(image, axis=tuple(range(image.ndim - 1)))
        mean = mean / len(self.data)
        print("样本mean:", mean)

        std = np.array([0.0, 0.0, 0.0])
        for index, image in enumerate(self.data):
            if index % 500 == 0:
                print("已处理std数量:", str(index) + " / " + str(len(self.data)))
            local_mean = np.mean(image, axis=tuple(range(image.ndim - 1)))
            diff = pow(local_mean - mean, 2)
            std += diff
        std = np.sqrt(std / len(self.data))
        print("样本std:", std)
        return mean, std


    def visualize(self, category, display_number=4):
        '''
        categorye      : 动物类别，可为cat, dog, panda
        display_number : 展示的图片数
        '''
        mapping = {"cat": self.cats, "dog": self.dogs, "panda": self.pandas}
        images_for_display = mapping[category][:display_number]
        # 取出display_number数量的图片，进行画图
        for index, image in enumerate(images_for_display):
            plt.subplot(1, display_number, index+1)
            plt.imshow(image)
            plt.title(category + str(index+1))
        plt.show()


    def visualize_aug(self, category):
        '''
        categorye      : 动物类别，可为cat, dog, panda
        '''
        mapping = {"cat": self.cats, "dog": self.dogs, "panda": self.pandas}
        print("查看数据增强结果:")
        images_ori = mapping[category][0:5]
        images_ver = mapping[category][800:805]
        images_hor = mapping[category][1600:1605]
        images_rot = mapping[category][2400:2405]
        images_col = mapping[category][3200:3205]
        images_for_display = images_ori + images_ver + images_hor + images_rot + images_col
        for index, image in enumerate(images_for_display):
            plt.subplot(5, 5, index+1)
            plt.imshow(image)
            plt.title(category + str(index+1))
        plt.show()


    def __getitem__(self, index):
        return self.data[index], self.label[index]


    def __len__(self):
        return len(self.data)


# 测试函数1，单测试AnimalDatset类
def TestDataset():
    animal_dataset = AnimalDatset("./animals_dataset", "train")
    cats = animal_dataset.cats
    print("cats的数量: ", len(cats))
    # 将dog的图像展示出来看看
    animal_dataset.visualize("dog", 5)
    animal_dataset.visualize_aug("dog")


# 测试函数1-1
def TestDatasetNormalize():
    animal_dataset = AnimalDatset("./animals_dataset", "train")
    mean, std = animal_dataset.get_normalize()
    print("图像均值: ", mean, "图片方差: ", std)


# 测试函数2，使用AnimalDatset类构建MindSpore自定义数据集
def TestMindSporeDataset():
    # 对train部分的数据构建dataset
    animal_dataset1 = AnimalDatset("./animals_dataset", "train")
    # 放到GeneratorDataset中，2个数据列命名为"image", "label"
    train_dataset = ds.GeneratorDataset(animal_dataset1,
                                        column_names=["image", "label"],
                                        num_parallel_workers=4,
                                        shuffle=False)
    # 迭代一下数据看看是否正常，由于shuffle=False，所以是按train目录中文件的顺序读取
    print("Train Data iter: ")
    for index, data in enumerate(train_dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
        print(data["image"].shape, data["label"])
        if index > 5:
            break

    # 对test部分的数据构建dataset
    animal_dataset2 = AnimalDatset("./animals_dataset", "test")
    # 放到GeneratorDataset中，2个数据列命名为"image", "label"
    test_dataset = ds.GeneratorDataset(animal_dataset2,
                                       column_names=["image", "label"],
                                       num_parallel_workers=4,
                                       shuffle=False)
    # 迭代一下数据看看是否正常，由于shuffle=False，所以是按test目录中文件的顺序读取
    print("Test Data iter: ")
    for index, data in enumerate(test_dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
        print(data["image"].shape, data["label"])
        if index > 5:
            break


def CreateAnimalDataset(dataset_path, usage):
    # 构建dataset
    animal_dataset = AnimalDatset(dataset_path, usage)
    # 放到GeneratorDataset中，2个数据列命名为"image", "label"
    dataset = ds.GeneratorDataset(animal_dataset,
                                        column_names=["image", "label"],
                                        num_parallel_workers=4,
                                        shuffle=True)

    if usage == "train":
        dataset = dataset.map([vision.Resize((255, 255)),
                               vision.RandomCrop((224, 224)),
                               vision.Normalize([109.51911076, 104.72587998, 94.43492111], 
                                                [31.54789881, 29.16523819, 29.6411846]),
                               vision.HWC2CHW()
                              ],
                              input_columns=["image"],
                              num_parallel_workers=4)
        dataset = dataset.map(transform.TypeCast(mstype.int32), input_columns=["label"])
        dataset = dataset.batch(64, True, num_parallel_workers=2)

    elif usage == "test":
        dataset = dataset.map([vision.Resize((255)),
                               vision.CenterCrop((224, 224)),
                               vision.Normalize([109.51911076, 104.72587998, 94.43492111], 
                                                [31.54789881, 29.16523819, 29.6411846]),
                               vision.HWC2CHW()
                               ],
                               input_columns=["image"],
                               num_parallel_workers=4)
        dataset = dataset.map(transform.TypeCast(mstype.int32), input_columns=["label"])
        dataset = dataset.batch(20, False, num_parallel_workers=2)

    else:
        raise KeyError("invalid usage")

    return dataset


if __name__ == "__main__":
    #TestDataset()
    #TestMindSporeDataset()
    TestDatasetNormalize()
