from skimage import io, transform
import glob
import os
import numpy as np
import random
from src.utils.sort import Sort
from src.utils.shuffle import Shuffle


class ReadImg:
    # 图像输入路径，初始化，类似JAVA的构造函数

    '''
    width:读取图像后整理的宽度
    height:读取图像后整理的高度
    '''

    def __init__(self, width, height):
        self.width = width
        self.height = height


    # 读取整个训练集文件夹（经典网络，单任务）
    '''
    batch_size：每一批训练的张数
    shuffle: 决定是否打乱数据
    '''
    def read_data(self, pic_input_path, batch_size, kind, shuffle = False):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [pic_input_path + x for x in os.listdir(pic_input_path)
                    if os.path.isdir(pic_input_path + x) and not os.path.isfile(pic_input_path + x)]
        last_path = []   # 由于是多任务，所以cancer和normal文件夹还有几个不同放大倍数的文件夹，这里需要再遍历一遍
        for idx, folder in enumerate(sub_path):
            folder += "/"
            for x in os.listdir(folder):
                if os.path.isdir(folder + x):
                    last_path.append(folder + x)

        # 读取路径中所有的图片路径
        imageSet = []
        for single_folder in last_path:
            imageSet.extend(glob.glob(single_folder + '/*.jpg'))

        # 打乱训练集
        if shuffle:
            random.shuffle(imageSet)

        # 正式输入数据到网络
        i = 0  # 第几张，判断循环是否到了batch_size张数
        order = len(imageSet)  # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
        imgs= []
        labels_for_degree = []
        labels_for_magnification = []
        for im in imageSet:
            i += 1
            img = io.imread(im)
            img = transform.resize(img, (self.width, self.height))
            imgs.append(img)
            if im.find('g0') != -1:
                labels_for_degree.append(np.array([1,0,0,0],dtype=np.float32))
            elif im.find('g1') != -1:
                labels_for_degree.append(np.array([0,1,0,0],dtype=np.float32))
            elif im.find('g2') != -1:
                labels_for_degree.append(np.array([0,0,1,0],dtype=np.float32))
            elif im.find('g3') != -1:
                labels_for_degree.append(np.array([0,0,0,1],dtype=np.float32))

            if im.find('5X') != -1:
                labels_for_magnification.append(np.array([1,0,0],dtype=np.float32))
            elif im.find('10X') != -1:
                labels_for_magnification.append(np.array([0,1,0],dtype=np.float32))
            elif im.find('20X') != -1:
                labels_for_magnification.append(np.array([0,0,1],dtype=np.float32))

            # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
            if i == batch_size or (i != batch_size and i== order):
                if kind == 1:
                    yield np.asarray(imgs, np.float32), np.asarray(labels_for_degree, np.float32)
                elif kind == 2:
                    yield np.asarray(imgs, np.float32), np.asarray(labels_for_magnification, np.float32)
                imgs = []
                labels_for_degree = []
                labels_for_magnification = []
                order -= batch_size
                i = 0

    # 读取整个训练集文件夹（自己的网络，多任务）
    def read_data2(self, pic_input_path, batch_size, shuffle = False):
        # 找寻路径下的文件夹，并保存其完整路径
        sub_path = [pic_input_path + x for x in os.listdir(pic_input_path)
                    if os.path.isdir(pic_input_path + x) and not os.path.isfile(pic_input_path + x)]
        last_path = []   # 由于是多任务，所以cancer和normal文件夹还有几个不同放大倍数的文件夹，这里需要再遍历一遍
        for idx, folder in enumerate(sub_path):
            folder += "/"
            for x in os.listdir(folder):
                if os.path.isdir(folder + x):
                    last_path.append(folder + x)

        # 读取路径中所有的图片路径
        imageSet = []
        for single_folder in last_path:
            imageSet.extend(glob.glob(single_folder + '/*.jpg'))

        # 打乱训练集
        if shuffle:
            random.shuffle(imageSet)

        # 正式输入数据到网络
        i = 0  # 第几张，判断循环是否到了batch_size张数
        order = len(imageSet)  # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
        imgs= []
        labels_for_degree = []
        labels_for_magnification = []
        for im in imageSet:
            i += 1
            img = io.imread(im)
            img = transform.resize(img, (self.width, self.height))
            imgs.append(img)
            if im.find('g0') != -1:
                labels_for_degree.append(np.array([1,0,0,0],dtype=np.float32))
            elif im.find('g1') != -1:
                labels_for_degree.append(np.array([0,1,0,0],dtype=np.float32))
            elif im.find('g2') != -1:
                labels_for_degree.append(np.array([0,0,1,0],dtype=np.float32))
            elif im.find('g3') != -1:
                labels_for_degree.append(np.array([0,0,0,1],dtype=np.float32))

            if im.find('5X') != -1:
                labels_for_magnification.append(np.array([1,0,0],dtype=np.float32))
            elif im.find('10X') != -1:
                labels_for_magnification.append(np.array([0,1,0],dtype=np.float32))
            elif im.find('20X') != -1:
                labels_for_magnification.append(np.array([0,0,1],dtype=np.float32))

            # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
            if i == batch_size or (i != batch_size and i== order):
                yield np.asarray(imgs, np.float32), np.asarray(labels_for_degree, np.float32), np.asarray(labels_for_magnification, np.float32)
                imgs = []
                labels_for_degree = []
                labels_for_magnification = []
                order -= batch_size
                i = 0

    # 读取整个要分类图片的文件夹（自己的多任务网络，执行单任务）
    def read_data3(self, pic_input_path, batch_size, shuffle = False):
        # 读取路径中所有的图片路径
        imageSet = []
        imageSet.extend(glob.glob(pic_input_path + '/*.jpg'))

        # 因为glob.glob读取的文件排序不是按照数字顺序排序，所以此处要处理一下
        st = Sort(imageSet)
        imageSet = st.sort_strings_with_emb_numbers()

        # 打乱训练集
        if shuffle:
            random.shuffle(imageSet)

        # 正式输入数据到网络
        i = 0  # 第几张，判断循环是否到了batch_size张数
        order = len(imageSet)  # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
        imgs= []
        for im in imageSet:
            i += 1
            img = io.imread(im)
            img = transform.resize(img, (self.width, self.height))
            imgs.append(img)

            # 避免不是read_img_batch_size整数倍的数据出现，而导致漏算的情形
            if i == batch_size or (i != batch_size and i== order):
                yield np.asarray(imgs, np.float32)
                imgs = []
                order -= batch_size
                i = 0