import glob
import os
import random
import threading

from concurrent.futures import ThreadPoolExecutor, wait
from queue import Queue

import cv2
import loguru
import numpy as np
import torch


class DeeplesionDuDoNetInferMultiLoader(object):
    """
    当前版本的input只有一个GatedConv的预测
    """
    # __train_root_path = r"F:\metal_artifact_data\easy_dataset\train"
    __train_root_path__ = r"E:\data_transform_buffer\metal_artifact\easy_dataset\test"

    def __init__(self, config, op_module_fp):
        self.op_module_fp = op_module_fp
        self.config = config
        self.multiple_ratio = 40

        self.logger = loguru.logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.batch_size = self.config.batchSize if self.config == "train" else self.config.inferBatchSize
        self.add_threshold = self.batch_size * self.multiple_ratio  # 每次添加的数据的量
        self.max_threshold = self.batch_size * 80

        self.logger.info("Start to read path list")

        self.XGateConv_pred_path_list = list()  # GateConvolution 预测的结果
        # self.XInDuDoNet_pred_path_list = list()  # InDuDoNet预测的结果
        self.all_Xgt_path_list = list()  # 图像域的ground truth 未经过投影和反投影的
        self.all_Xma_path_list = list()  # 原始图像域图像
        self.all_Xli_path_list = list()
        self.__letter__ = ['A']
        for c in self.__letter__:
            self.inference_save_root_path = os.path.join(self.__train_root_path__, config.name + "_inference")
            self.all_Xgt_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path__, "B", "*")))
            self.all_Xma_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path__, c, "*")))
            self.all_Xli_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path__, c + "_LI", "*")))
        self.logger.info("Reading completed!")

        assert len(self.all_Xgt_path_list) == len(self.all_Xli_path_list) == len(self.all_Xma_path_list) != 0

        self.path_num = len(self.all_Xgt_path_list)
        self.ratio_path_num = int(self.path_num * self.config.len_ratio)
        self.actual_dataloader_length = int(self.ratio_path_num / self.batch_size)

        # self.__shuffle__()

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()

        self.queue = Queue()

        self.adding_threading_number = 0  # 正在添加数据的线程数量，不控制的话会提交大量的提交数据任务
        self.adding_threading_number_mutex = threading.Lock()

        self.S_normalize_coefficient = config.S_normalize_coefficient
        # self.add_data_to_queue()
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        return self.actual_dataloader_length

    def __shuffle__(self):
        self.logger.info("Start to shuffle...")
        self.shuffled_index_list = [i for i in random.sample(range(0, self.path_num), self.path_num)]
        self.all_Xgt_path_list = [self.all_Xgt_path_list[i] for i in self.shuffled_index_list]
        self.all_Xma_path_list = [self.all_Xma_path_list[i] for i in self.shuffled_index_list]
        self.all_Xli_path_list = [self.all_Xli_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.batch_size * self.multiple_ratio

        # self.__shuffle__()
        self.Xgt_path_list = self.all_Xgt_path_list[0:int(self.config.len_ratio * len(self.all_Xgt_path_list))]
        self.Xma_path_list = self.all_Xma_path_list[
                             0:int(self.config.len_ratio * len(self.all_Xma_path_list))]
        self.Xli_path_list = self.all_Xli_path_list[
                             0:int(self.config.len_ratio * len(self.all_Xli_path_list))]
        return self

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 还没人在加  自己去读或者已经没有剩下的了
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常

                    if future.result() is False:
                        del future
                        raise StopIteration
                return_batch_list = self.get_next_batch()
                return return_batch_list
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                return_batch_list = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < self.threading_pool_size:
                    self.logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return return_batch_list
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.ratio_path_num:
            self.add_threshold = self.ratio_path_num - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        self.logger.info("Get reading index completely!")
        return tmp_read_index_list

    def get_next_batch(self):

        Xgt_list = list()
        Xli_list = list()
        Xma_list = list()
        Xmetal_list = list()
        Sgt_list = list()
        Sli_list = list()
        Sma_list = list()
        Smetal_list = list()
        inference_save_path_list = list()

        for i in range(self.batch_size):
            data = self.queue.get()
            Xgt = data["Xgt"].unsqueeze(0)
            Xli = data["Xli"].unsqueeze(0)
            Xma = data["Xma"].unsqueeze(0)
            Xmetal = data["Xmetal"].unsqueeze(0)
            Sgt = data["Sgt"].unsqueeze(0)
            Sma = data["Sma"].unsqueeze(0)
            Sli = data["Sli"].unsqueeze(0)
            Smetal = data["Smetal"].unsqueeze(0)

            self.queue.task_done()
            Xgt_list.append(Xgt)
            Xli_list.append(Xli)
            Xma_list.append(Xma)
            Xmetal_list.append(Xmetal)
            Sgt_list.append(Sgt)
            Sli_list.append(Sma)
            Sma_list.append(Sli)
            Smetal_list.append(Smetal)
            inference_save_path_list.append(data["inference_save_path"])

        return [torch.cat(Xgt_list, 0),
                torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sgt_list, 0), torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0), inference_save_path_list]

    def add_data_to_queue(self, add_index_list):
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        self.logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")

        #  self.sample_index_mutex.acquire()
        # self.queue_operator_mutex.acquire()
        """
        deeplesion 中数据的尺寸是256*256
        """
        for ii in add_index_list:
            Xgt_path = self.Xgt_path_list[ii]
            Xma_path = self.Xma_path_list[ii]
            Xli_path = self.Xli_path_list[ii]
            inference_save_path = os.path.join(self.inference_save_root_path,
                                               os.path.basename(Xma_path))

            Xli = cv2.imread(Xli_path, 0)
            Xli = torch.from_numpy(Xli).unsqueeze(0).float()
            Sli = self.op_module_fp(Xli / 255.) / self.S_normalize_coefficient * 255.

            Xma = cv2.imread(Xma_path, 0)
            Xma = torch.from_numpy(Xma).unsqueeze(0).float()
            Sma = self.op_module_fp(Xma / 255.) / self.S_normalize_coefficient * 255.

            Xmetal = torch.where(Xma == 255, 1, 0).float()  # .astype(np.uint8)
            Smetal = self.op_module_fp(Xmetal)
            Smetal = torch.where(Smetal > 0, 1, 0)

            Xgt = cv2.imread(Xgt_path, 0)
            Xgt = torch.from_numpy(Xgt).unsqueeze(0).float()
            Sgt = self.op_module_fp(Xgt / 255.) / self.S_normalize_coefficient * 255.

            # Xma = torch.from_numpy(Xma).unsqueeze(0)
            self.queue.put({
                "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal,
                "inference_save_path": inference_save_path
            })

        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        # self.queue_operator_mutex.release()

        self.logger.info("After adding, current size of queue is " + str(int(self.queue.qsize())))
        self.logger.info(
            "The current sample index is " + str(self.current_read_sample_index) + "/" + str(self.ratio_path_num))
        return True


class DeeplesionDuDoNetMultiLoader(object):
    """
    当前版本的input只有一个GatedConv的预测
    """
    # __train_root_path = r"F:\metal_artifact_data\easy_dataset\train"
    __train_root_path = r"E:\data_transform_buffer\metal_artifact\easy_dataset\train"

    def __init__(self, config, op_module_fp):
        self.config = config
        self.op_module_fp = op_module_fp
        self.multiple_ratio = 40

        self.logger = loguru.logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.batch_size = self.config.batchSize if self.config == "train" else self.config.inferBatchSize
        self.add_threshold = self.batch_size * self.multiple_ratio  # 每次添加的数据的量
        self.max_threshold = self.batch_size * 80

        self.logger.info("Start to read path list")

        self.XGateConv_pred_path_list = list()  # GateConvolution 预测的结果
        # self.XInDuDoNet_pred_path_list = list()  # InDuDoNet预测的结果
        self.all_Xgt_path_list = list()  # 图像域的ground truth 未经过投影和反投影的
        self.all_Xma_path_list = list()  # 原始图像域图像
        self.all_Xli_path_list = list()
        self.__letter__ = ['A', 'B', 'D']
        for c in self.__letter__:
            self.all_Xgt_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path, "C", "*")))
            self.all_Xma_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path, c, "*")))
            self.all_Xli_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path, c + "_LI", "*")))
        self.logger.info("Reading completed!")

        assert len(self.all_Xgt_path_list) == len(self.all_Xli_path_list) == len(self.all_Xma_path_list) != 0

        self.path_num = len(self.all_Xgt_path_list)
        self.ratio_path_num = int(self.path_num * self.config.len_ratio)
        self.actual_dataloader_length = int(self.ratio_path_num / self.batch_size)

        # self.__shuffle__()

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()

        self.queue = Queue()

        self.adding_threading_number = 0  # 正在添加数据的线程数量，不控制的话会提交大量的提交数据任务
        self.adding_threading_number_mutex = threading.Lock()

        self.S_normalize_coefficient = config.S_normalize_coefficient
        # self.add_data_to_queue()
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        return self.actual_dataloader_length

    def __shuffle__(self):
        self.logger.info("Start to shuffle...")
        self.shuffled_index_list = [i for i in random.sample(range(0, self.path_num), self.path_num)]
        self.all_Xgt_path_list = [self.all_Xgt_path_list[i] for i in self.shuffled_index_list]
        self.all_Xma_path_list = [self.all_Xma_path_list[i] for i in self.shuffled_index_list]
        self.all_Xli_path_list = [self.all_Xli_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.batch_size * self.multiple_ratio

        self.__shuffle__()
        self.Xgt_path_list = self.all_Xgt_path_list[0:int(self.config.len_ratio * len(self.all_Xgt_path_list))]
        self.Xma_path_list = self.all_Xma_path_list[
                             0:int(self.config.len_ratio * len(self.all_Xma_path_list))]
        self.Xli_path_list = self.all_Xli_path_list[
                             0:int(self.config.len_ratio * len(self.all_Xli_path_list))]
        return self

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 还没人在加  自己去读或者已经没有剩下的了
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常

                    if future.result() is False:
                        del future
                        raise StopIteration
                return_batch_list = self.get_next_batch()
                return return_batch_list
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                return_batch_list = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < self.threading_pool_size:
                    self.logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return return_batch_list
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.ratio_path_num:
            self.add_threshold = self.ratio_path_num - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        self.logger.info("Get reading index completely!")
        return tmp_read_index_list

    def get_next_batch(self):

        Xgt_list = list()
        Xli_list = list()
        Xma_list = list()
        Xmetal_list = list()
        Sgt_list = list()
        Sli_list = list()
        Sma_list = list()
        Smetal_list = list()

        for i in range(self.batch_size):
            data = self.queue.get()
            Xgt = data["Xgt"].unsqueeze(0)
            Xli = data["Xli"].unsqueeze(0)
            Xma = data["Xma"].unsqueeze(0)
            Xmetal = data["Xmetal"].unsqueeze(0)
            Sgt = data["Sgt"].unsqueeze(0)
            Sma = data["Sma"].unsqueeze(0)
            Sli = data["Sli"].unsqueeze(0)
            Smetal = data["Smetal"].unsqueeze(0)

            self.queue.task_done()
            Xgt_list.append(Xgt)
            Xli_list.append(Xli)
            Xma_list.append(Xma)
            Xmetal_list.append(Xmetal)
            Sgt_list.append(Sgt)
            Sli_list.append(Sma)
            Sma_list.append(Sli)
            Smetal_list.append(Smetal)

        return [torch.cat(Xgt_list, 0),
                torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sgt_list, 0), torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0)]

    def add_data_to_queue(self, add_index_list):
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        self.logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")

        #  self.sample_index_mutex.acquire()
        # self.queue_operator_mutex.acquire()
        """
        deeplesion 中数据的尺寸是256*256
        """
        for ii in add_index_list:
            Xgt_path = self.Xgt_path_list[ii]
            Xma_path = self.Xma_path_list[ii]
            Xli_path = self.Xli_path_list[ii]

            Xli = cv2.imread(Xli_path, 0)
            Xli = torch.from_numpy(Xli).unsqueeze(0).float()
            Sli = self.op_module_fp(Xli / 255.) / self.S_normalize_coefficient * 255.

            Xma = cv2.imread(Xma_path, 0)
            Xma = torch.from_numpy(Xma).unsqueeze(0).float()
            Sma = self.op_module_fp(Xma / 255.) / self.S_normalize_coefficient * 255.

            Xmetal = torch.where(Xma == 255, 1, 0).float()  # .astype(np.uint8)
            Smetal = self.op_module_fp(Xmetal)
            Smetal = torch.where(Smetal > 0, 1, 0)

            Xgt = cv2.imread(Xgt_path, 0)
            Xgt = torch.from_numpy(Xgt).unsqueeze(0).float()
            Sgt = self.op_module_fp(Xgt / 255.) / self.S_normalize_coefficient * 255.

            # Xma = torch.from_numpy(Xma).unsqueeze(0)
            self.queue.put({
                "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal,
            })

        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        # self.queue_operator_mutex.release()

        self.logger.info("After adding, current size of queue is " + str(int(self.queue.qsize())))
        self.logger.info(
            "The current sample index is " + str(self.current_read_sample_index) + "/" + str(self.ratio_path_num))
        return True
