from skimage import io, transform
import glob
import os
import numpy as np
from src.utils.sort import Sort


class ReadImg:
    # 图像输入路径，初始化，类似JAVA的构造函数

    '''
    pic_input_path：训练集(train)或测试集(test)路径
    weight:读取图像后整理的宽度
    height:读取图像后整理的高度
    '''

    def __init__(self, pic_input_path, weight, height):
        self.pic_input_path = pic_input_path
        self.weight = weight
        self.height = height

    # 读取有癌或无癌（基本数据集，文件夹下只有有癌无癌两种文件夹）
    def read_basic_dataSet(self):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if
                    os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        imgs = []
        labels_for_class = []
        labels_for_degree = []
        for idx, folder in enumerate(sub_path):
            for im in glob.glob(folder + '/*.jpg'):
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)

                if folder.find('cancer') != -1:
                    labels_for_class.append(1)
                else:
                    labels_for_class.append(0)
        return np.asarray(imgs, np.float32), np.asarray(labels_for_class, np.int32)

    # 读取多任务：有癌或无癌、不同癌症等级
    def read_dataSet_in_multi_task(self):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]
        # 由于是多任务，所以cancer和normal文件夹还有几个不同放大倍数的文件夹，这里需要再遍历一遍
        last_path = []
        for idx, folder in enumerate(sub_path):
            folder += "/"
            for x in os.listdir(folder):
                if os.path.isdir(folder + x):
                    last_path.append(folder + x)

        imgs = []
        labels_for_class = []
        labels_for_degree = []
        for idx, folder in enumerate(last_path):
            for im in glob.glob(folder + '/*.jpg'):
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)
                if folder.find('cancer_degree1') != -1:
                    labels_for_degree.append(3)
                elif folder.find('cancer_degree2') != -1:
                    labels_for_degree.append(2)
                elif folder.find('cancer_degree3') != -1:
                    labels_for_degree.append(1)
                elif folder.find('normal') != -1:
                    labels_for_degree.append(0)

                if folder.find('cancer') != -1:
                    labels_for_class.append(1)
                else:
                    labels_for_class.append(0)
        return np.asarray(imgs, np.float32), np.asarray(labels_for_class, np.int32), np.asarray(labels_for_degree,
                                                                                                np.int32)

    # 读取多任务：HER2下的不同癌症等级、放大倍数
    def read_dataSet_in_multi_task2(self,copies=1):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        last_path = []
        for idx, folder in enumerate(sub_path):
            folder += "/"
            for x in os.listdir(folder):
                if os.path.isdir(folder + x):
                    last_path.append(folder + x)

        imgs = []
        labels_for_magnification = []
        labels_for_degree = []

        for idx, folder in enumerate(last_path):
            imageSet = glob.glob(folder + '/*.jpg')
            copy_number = int(len(imageSet)*copies)
            extract_image = imageSet[:copy_number]

            for im in extract_image:
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)
                if folder.find('g0') != -1:
                    labels_for_degree.append(0)
                elif folder.find('g1') != -1:
                    labels_for_degree.append(1)
                elif folder.find('g2') != -1:
                    labels_for_degree.append(2)
                elif folder.find('g3') != -1:
                    labels_for_degree.append(3)

                if folder.find('10X') != -1:
                    labels_for_magnification.append(0)
                else:
                    labels_for_magnification.append(1)

        return np.asarray(imgs, np.float32), np.asarray(labels_for_degree,np.int32), np.asarray(labels_for_magnification,np.int32)


    # 读取单任务：有癌下的各种放大倍数
    def read_dataSet_in_single_task(self):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        imgs = []
        labels_for_magnification = []
        for idx, folder in enumerate(sub_path):
            for im in glob.glob(folder + '/*.jpg'):
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)
                if folder.find('20X') != -1:
                    labels_for_magnification.append(3)
                elif folder.find('15X') != -1:
                    labels_for_magnification.append(2)
                elif folder.find('10X') != -1:
                    labels_for_magnification.append(1)
                elif folder.find('5X') != -1:
                    labels_for_magnification.append(0)

        return np.asarray(imgs, np.float32), np.asarray(labels_for_magnification,np.int32)

    # 读取单任务：HER2下的等级
    def read_dataSet_in_single_task2(self):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        imgs = []
        labels_for_HER2 = []
        for idx, folder in enumerate(sub_path):
            for im in glob.glob(folder + '/*.jpg'):
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)
                if folder.find('g0') != -1:
                    labels_for_HER2.append(0)
                elif folder.find('g1') != -1:
                    labels_for_HER2.append(1)
                elif folder.find('g2') != -1:
                    labels_for_HER2.append(2)
                elif folder.find('g3') != -1:
                    labels_for_HER2.append(3)

        return np.asarray(imgs, np.float32), np.asarray(labels_for_HER2,np.int32)

    # 读取单任务：HER2下的等级（可以根据自己需要，选择文件夹下面的一部分数据）
    def read_some_dataSet_in_single_task2(self,copies=1):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        imgs = []
        labels_for_HER2 = []

        for idx, folder in enumerate(sub_path):
            imageSet = glob.glob(folder + '/*.jpg')
            copy_number = int(len(imageSet)*copies)
            extract_image = imageSet[:copy_number]

            for im in extract_image:
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)
                if folder.find('g0') != -1:
                    labels_for_HER2.append(0)
                elif folder.find('g1') != -1:
                    labels_for_HER2.append(1)
                elif folder.find('g2') != -1:
                    labels_for_HER2.append(2)
                elif folder.find('g3') != -1:
                    labels_for_HER2.append(3)

        return np.asarray(imgs, np.float32), np.asarray(labels_for_HER2,np.int32)

    # 读取单任务：HER2下的等级（可以根据自己需要，选择文件夹下面的一部分数据）
    # 【对于softmax层，做些不同的处理，标签是几种可能的概率分布】
    def read_some_dataSet_for_softmax_in_single_task2(self,copies=1):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        imgs = []
        labels_for_HER2 = []

        for idx, folder in enumerate(sub_path):
            imageSet = glob.glob(folder + '/*.jpg')
            copy_number = int(len(imageSet)*copies)
            extract_image = imageSet[:copy_number]

            for im in extract_image:
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)
                if folder.find('g0') != -1:
                    labels_for_HER2.append(np.array([1,0,0,0],dtype=np.float32))
                elif folder.find('g1') != -1:
                    labels_for_HER2.append(np.array([0,1,0,0],dtype=np.float32))
                elif folder.find('g2') != -1:
                    labels_for_HER2.append(np.array([0,0,1,0],dtype=np.float32))
                elif folder.find('g3') != -1:
                    labels_for_HER2.append(np.array([0,0,0,1],dtype=np.float32))

        return np.asarray(imgs, np.float32), np.asarray(labels_for_HER2,np.float32)


    # 读取多任务：HER2下的等级，放大倍数（可以根据自己需要，选择文件夹下面的一部分数据）
    # 【对于softmax层，做些不同的处理，标签是几种可能的概率分布】
    def read_some_dataSet_for_softmax_in_multi_task2(self,copies=1):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        last_path = []
        for idx, folder in enumerate(sub_path):
            folder += "/"
            for x in os.listdir(folder):
                if os.path.isdir(folder + x):
                    last_path.append(folder + x)

        imgs = []
        labels_for_magnification = []
        labels_for_degree = []

        for idx, folder in enumerate(last_path):
            imageSet = glob.glob(folder + '/*.jpg')
            copy_number = int(len(imageSet)*copies)
            extract_image = imageSet[:copy_number]

            for im in extract_image:
                # print('reading the images:%s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (self.weight, self.height))
                imgs.append(img)
                if folder.find('g0') != -1:
                    labels_for_degree.append(np.array([1,0,0,0],dtype=np.float32))
                elif folder.find('g1') != -1:
                    labels_for_degree.append(np.array([0,1,0,0],dtype=np.float32))
                elif folder.find('g2') != -1:
                    labels_for_degree.append(np.array([0,0,1,0],dtype=np.float32))
                elif folder.find('g3') != -1:
                    labels_for_degree.append(np.array([0,0,0,1],dtype=np.float32))

                if folder.find('10X') != -1:
                    labels_for_magnification.append(np.array([1,0],dtype=np.float32))
                else:
                    labels_for_magnification.append(np.array([0,1],dtype=np.float32))

        return np.asarray(imgs, np.float32), np.asarray(labels_for_degree,np.float32), np.asarray(labels_for_magnification,np.float32)

    # 读取多任务：【HER2下某个具体的文件夹】（可以根据自己需要，选择文件夹下面的一部分数据）
    # 主要用于模型测试
    # 【对于softmax层，做些不同的处理，标签是几种可能的概率分布】
    def read_single_folder_for_softmax_in_multi_task2(self,copies=1):
        imgs = []
        labels_for_magnification = []
        labels_for_degree = []

        imageSet = glob.glob(self.pic_input_path + '/*.jpg')
        copy_number = int(len(imageSet)*copies)
        extract_image = imageSet[:copy_number]

        for im in extract_image:
            # print('reading the images:%s' % (im))
            img = io.imread(im)
            img = transform.resize(img, (self.weight, self.height))
            imgs.append(img)
            if self.pic_input_path.find('g0') != -1:
                labels_for_degree.append(np.array([1,0,0,0],dtype=np.float32))
            elif self.pic_input_path.find('g1') != -1:
                labels_for_degree.append(np.array([0,1,0,0],dtype=np.float32))
            elif self.pic_input_path.find('g2') != -1:
                labels_for_degree.append(np.array([0,0,1,0],dtype=np.float32))
            elif self.pic_input_path.find('g3') != -1:
                labels_for_degree.append(np.array([0,0,0,1],dtype=np.float32))

            if self.pic_input_path.find('10X') != -1:
                labels_for_magnification.append(np.array([1,0],dtype=np.float32))
            else:
                labels_for_magnification.append(np.array([0,1],dtype=np.float32))

        return np.asarray(imgs, np.float32), np.asarray(labels_for_degree,np.float32), np.asarray(labels_for_magnification,np.float32)

    # 读取多任务：HER2下的等级，放大倍数（利用yield批次读取，避免内存占用太大的问题！！！！！）
    # read_batch_size用来设置一次从硬盘中读取多少张图片（主要用于模型的调用中）
    # 【对于softmax层，做些不同的处理，标签是几种可能的概率分布】
    def read_some_dataSet_for_softmax_in_multi_task3(self,read_img_batch_size):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [self.pic_input_path + x for x in os.listdir(self.pic_input_path) if os.path.isdir(self.pic_input_path + x) and not os.path.isfile(self.pic_input_path + x)]

        last_path = []
        for idx, folder in enumerate(sub_path):
            folder += "/"
            for x in os.listdir(folder):
                if os.path.isdir(folder + x):
                    last_path.append(folder + x)

        imgs = []
        labels_for_magnification = []
        labels_for_degree = []
        i = 0
        imageSet = []

        for idx, folder in enumerate(last_path):
            imageSet.extend(glob.glob(folder + '/*.jpg'))

        order = len(imageSet)  # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形

        for im in imageSet:
            i += 1
            img = io.imread(im)
            img = transform.resize(img, (self.weight, self.height))
            imgs.append(img)
            if im.find('g0') != -1:
                labels_for_degree.append(np.array([1,0,0,0],dtype=np.float32))
            elif im.find('g1') != -1:
                labels_for_degree.append(np.array([0,1,0,0],dtype=np.float32))
            elif im.find('g2') != -1:
                labels_for_degree.append(np.array([0,0,1,0],dtype=np.float32))
            elif im.find('g3') != -1:
                labels_for_degree.append(np.array([0,0,0,1],dtype=np.float32))

            if im.find('10X') != -1:
                labels_for_magnification.append(np.array([1,0],dtype=np.float32))
            else:
                labels_for_magnification.append(np.array([0,1],dtype=np.float32))

            # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
            if i == read_img_batch_size or (i != read_img_batch_size and i== order):
                yield np.asarray(imgs, np.float32), np.asarray(labels_for_degree, np.float32), np.asarray(
                labels_for_magnification, np.float32)
                imgs = []
                labels_for_degree = []
                labels_for_magnification = []
                order -= read_img_batch_size
                i = 0


    # 读取多任务：【HER2下某个具体的文件夹】（利用yield批次读取，避免内存占用太大的问题！！！！！）
    # read_batch_size用来设置一次从硬盘中读取多少张图片（主要用于模型的图片坐标对应选取中，用于热图生成txt文件）
    # 【对于softmax层，做些不同的处理，标签是几种可能的概率分布】
    def read_single_folder_for_softmax_in_multi_task3(self,read_img_batch_size,copies=1):

        imageSet = []
        imageSet.extend(glob.glob(self.pic_input_path + '/*.jpg'))

        # 因为glob.glob读取的文件排序不是按照数字顺序排序，所以此处要处理一下
        st = Sort(imageSet)
        imageSet = st.sort_strings_with_emb_numbers()
        copy_number = int(len(imageSet)*copies)
        extract_image = imageSet[:copy_number]

        i = 0
        order = len(extract_image)  # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形

        imgs= []
        labels_for_degree = []
        labels_for_magnification = []
        for im in extract_image:
            i += 1
            img = io.imread(im)
            img = transform.resize(img, (self.weight, self.height))
            imgs.append(img)
            labels_for_degree.append(np.array([1, 0, 0, 0], dtype=np.float32))   # 这里因为不需要判断准确率，只需要模型识别图片类型，所以随意写
            labels_for_magnification.append(np.array([1,0],dtype=np.float32))    # 这里因为不需要判断准确率，只需要模型识别图片类型，所以随意写

            # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
            if i == read_img_batch_size or (i != read_img_batch_size and i== order):
                yield np.asarray(imgs, np.float32), np.asarray(labels_for_degree, np.float32), np.asarray(
                labels_for_magnification, np.float32)
                imgs = []
                labels_for_degree = []
                labels_for_magnification = []
                order -= read_img_batch_size
                i = 0


class Extract:

    # 从原始数据集中提取一定数量的批次（单任务）

    '''
    inputs：图像  训练集(train)或测试集(test)
    targets1：标签  训练集(train)或测试集(test)
    batch_size：每一批的图像数目
    shuffle：是否打乱
    '''
    def minibatches(self,inputs=None, targets1=None,batch_size=None, shuffle=False):
        assert len(inputs) == len(targets1)
        if shuffle:
            # arange返回一个array对象，为括号内长度的等差数列（从0开始算）
            indices = np.arange(len(inputs))
            np.random.shuffle(indices)
        for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
            if shuffle:
                excerpt = indices[start_idx:start_idx + batch_size]
            else:
                excerpt = slice(start_idx, start_idx + batch_size)
            yield inputs[excerpt], targets1[excerpt]

    # 从原始数据集中提取一定数量的批次（多任务）

    '''
    inputs：图像  训练集(train)或测试集(test)
    targets1：标签  训练集(train)或测试集(test)
    targets2: 标签  训练集(train)或测试集(test)
    batch_size：每一批的图像数目
    shuffle：是否打乱
    '''
    def multi_task_minibatches(self,inputs=None, targets1=None, targets2=None, batch_size=None, shuffle=False):
        assert len(inputs) == len(targets1) == len(targets2)
        if shuffle:
            # arange返回一个array对象，为括号内长度的等差数列（从0开始算）
            indices = np.arange(len(inputs))
            np.random.shuffle(indices)
        for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
            if shuffle:
                excerpt = indices[start_idx:start_idx + batch_size]
            else:
                excerpt = slice(start_idx, start_idx + batch_size)
            yield inputs[excerpt], targets1[excerpt], targets2[excerpt]