import csv
import glob
import os.path
import random
import threading
import traceback
from concurrent.futures import ThreadPoolExecutor, wait
from queue import Queue

import cv2
import numpy
import numpy as np
import scipy
import torch
import tqdm
from douloader.dataloaders import MultiDataLoaderBase
from torch.utils.data import DataLoader, Dataset

from utils.CommonUtils import normalize_S_InDuDoNet, normalize_X_InDuDoNet, check_create_dir
from utils.LogUtils import my_logger
from utils.utils import normalize_npy


class GatedConvMultiDataLoader1(object):
    """
    2022年12月12日14:25:51
    现在觉得cpu的读取速度有点跟不上gpu的训练速度
    这个版本是没有提前做投影的，另一个是提前做了投影的。
    """
    __train_root_path__ = r"F:\metal_artifact_data\easy_dataset\train"
    __train_root_path_C__ = r"C:\dataset\metal_artifact\easy_dataset\train"
    __test_root_path__ = r"F:\metal_artifact_data\easy_dataset\test\test"
    __letter__ = ["A", "B", "D"]

    def __init__(self, config, fbp_obj):

        self.logger = my_logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.add_threshold = config.BATCH_SIZE * 20  # 每次添加的数据的量
        self.max_threshold = config.BATCH_SIZE * 80
        self.config = config
        self.obj_FBP = fbp_obj
        # test_image = np.ones(shape=(256, 256)) * 255
        # test_S = self.obj_FBP.project(test_image)
        self.Xma_path_list = list()
        self.Xli_path_list = list()
        self.Xgt_path_list = list()

        self.logger.info("Start to read path list")
        for c in self.__letter__:
            self.Xli_path_list += sorted(glob.glob(os.path.join(self.__train_root_path_C__, c + "_LI", "*")))
            self.Xgt_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path_C__, "C", "*")))
            self.Xma_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path_C__, c, "*")))
        # self.__get_sinogram_max_value__()
        self.logger.info("Reading completed!")
        self.data_length = int(len(self.Xma_path_list) * config.len_ratio)
        if config.if_shuffle:
            self.__shuffle__()

        self.Xli_path_list = self.Xli_path_list[
                             0:int(config.len_ratio * len(self.Xli_path_list))]
        self.Xgt_path_list = self.Xgt_path_list[
                             0:int(config.len_ratio * len(self.Xgt_path_list))]
        self.Xma_path_list = self.Xma_path_list[
                             0:int(config.len_ratio * len(self.Xma_path_list))]
        # self.__prepare_sinogram()
        # if not (len(self.metal_sinogram_path_list) == len(self.LI_sinogram_path_list) == len(
        #         self.label_sinogram_path_list) != 0):
        #     self.__prepare_sinogram()
        assert len(self.Xli_path_list) == len(self.Xgt_path_list) == len(
            self.Xma_path_list) != 0

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()
        self.adding_threading_number_mutex = threading.Lock()
        self.adding_threading_number = 0
        self.queue = Queue()
        # self.add_data_to_queue()
        self.batch_size = config.BATCH_SIZE
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        length = int(self.data_length / self.batch_size)
        return length

    def __shuffle__(self):
        self.shuffled_index_list = [i for i in random.sample(range(0, self.data_length), self.data_length)]
        self.Xli_path_list = [self.Xli_path_list[i] for i in self.shuffled_index_list]
        self.Xgt_path_list = [self.Xgt_path_list[i] for i in self.shuffled_index_list]
        self.Xma_path_list = [self.Xma_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.config.BATCH_SIZE * 20
        self.__shuffle__()
        return self

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.data_length:
            self.add_threshold = self.data_length - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        return tmp_read_index_list

    def get_next_batch(self):
        tmp_Xma_list = list()
        tmp_Xli_list = list()
        tmp_Xgt_list = list()
        tmp_metal_list = list()
        tmp_Sma_list = list()
        tmp_Sli_list = list()
        tmp_Sgt_list = list()
        tmp_Tr_list = list()
        tmp_Xma_path_list = list()
        for i in range(self.batch_size):
            data = self.queue.get()
            Xma = torch.from_numpy(data["Xma"]).unsqueeze(0).unsqueeze(0)
            Xli = torch.from_numpy(data["Xli"]).unsqueeze(0).unsqueeze(0)
            Xgt = torch.from_numpy(data["Xgt"]).unsqueeze(0).unsqueeze(0)
            metal = torch.from_numpy(data["metal"]).unsqueeze(0).unsqueeze(0)
            Sma = torch.from_numpy(data["Sma"]).unsqueeze(0).unsqueeze(0)
            Sli = torch.from_numpy(data["Sli"]).unsqueeze(0).unsqueeze(0)
            Sgt = torch.from_numpy(data["Sgt"]).unsqueeze(0).unsqueeze(0)
            Tr = torch.from_numpy(data["Tr"]).unsqueeze(0).unsqueeze(0)
            self.queue.task_done()
            tmp_Xma_list.append(Xma)
            tmp_Xli_list.append(Xli)
            tmp_Xgt_list.append(Xgt)
            tmp_metal_list.append(metal)
            tmp_Sma_list.append(Sma)
            tmp_Sli_list.append(Sli)
            tmp_Sgt_list.append(Sgt)
            tmp_Tr_list.append(Tr)
            tmp_Xma_path_list.append(data["Xma_path"])
        return {
            "Xma": torch.cat(tmp_Xma_list, 0),
            "Xli": torch.cat(tmp_Xli_list, 0),
            "Xgt": torch.cat(tmp_Xgt_list, 0),
            "metal": torch.cat(tmp_metal_list, 0),
            "Sma": torch.cat(tmp_Sma_list, 0),
            "Sli": torch.cat(tmp_Sli_list, 0),
            "Sgt": torch.cat(tmp_Sgt_list, 0),
            "Tr": torch.cat(tmp_Tr_list, 0),
            "Xma_path": tmp_Xma_path_list
        }

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 没人在加 直接去读
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常
                    if future.result() is False:
                        del future
                        raise StopIteration

                # 已经有人在加可以等一等
                data_batch = self.get_next_batch()
                return data_batch
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                data_batch = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < 2 \
                        and self.current_read_sample_index < self.data_length:
                    my_logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return data_batch
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def add_data_to_queue(self, add_index_list):
        """
        InDuDoNet代码中，图像域的值域是[0,1] 正弦域的值域是[0,4]
        """
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        # self.queue_operator_mutex.acquire()
        my_logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")
        for ii in add_index_list:
            Xli_path = self.Xli_path_list[ii]
            Xgt_path = self.Xgt_path_list[ii]
            Xma_path = self.Xma_path_list[ii]

            Xli = cv2.imread(Xli_path, 0)  # read X
            Xgt = cv2.imread(Xgt_path, 0)
            Xma = cv2.imread(Xma_path, 0)
            metal = np.where(Xma == 255, 255, 0)

            Sli = self.obj_FBP.project(Xli)
            Sma = self.obj_FBP.project(Xma)
            Sgt = self.obj_FBP.project(Xgt)
            Tr = self.obj_FBP.project(metal)

            Sli = normalize_S_InDuDoNet(Sli).astype(np.float32)
            Sma = normalize_S_InDuDoNet(Sma).astype(np.float32)
            Sgt = normalize_S_InDuDoNet(Sgt).astype(np.float32)
            Tr = np.where(Tr > 0, 1, 0).astype(np.float32)

            Xli = normalize_X_InDuDoNet(Xli).astype(np.float32)
            Xgt = normalize_X_InDuDoNet(Xgt).astype(np.float32)
            Xma = normalize_X_InDuDoNet(Xma).astype(np.float32)

            # The order is Xma, XLI, Xgt, mask, Sma, SLI, Sgt, Tr
            self.queue.put({
                "Xma": Xma,
                "Xli": Xli,
                "Xgt": Xgt,
                "metal": metal,
                "Sma": Sma,
                "Sli": Sli,
                "Sgt": Sgt,
                "Tr": Tr,
                "Xma_path": Xma_path
            })
        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        my_logger.info("After adding, the current size of queue is " + str(int(self.queue.qsize())))
        my_logger.info(
            "After adding, the current sample index is " + str(self.current_read_sample_index) + "/" + str(
                self.data_length))
        return True

    def __prepare_sinogram(self):
        """
        现在发现如果在训练中使用fbp有点浪费时间，所以计划在训练前先做出sinogram
        需要保存的为S_synthetic,Tr,Sgt,Sma,因为存储的原因保存是[0,255]的图像域的图投影的正弦图
        A_Sinogram  LI图
        A_metal_Sinogram  金属正弦图
        A_label_Sinogram  标签Sinogram
        :return:
        """
        train_data_path_list = list()
        LI_data_path_list = list()
        label_data_path_list = list()

        train_data_path_list += sorted(glob.glob(os.path.join(self.__train_root_path_C__, "A", "*")))
        train_data_path_list += sorted(glob.glob(os.path.join(self.__train_root_path_C__, "B", "*")))
        LI_data_path_list += sorted(glob.glob(os.path.join(self.__train_root_path__, "A_LI", "*")))
        LI_data_path_list += sorted(glob.glob(os.path.join(self.__train_root_path__, "B_LI", "*")))
        label_data_path_list += sorted(glob.glob(os.path.join(self.__train_root_path_C__, "C", "*")))
        label_data_path_list += sorted(glob.glob(os.path.join(self.__train_root_path_C__, "C", "*")))

        for Xma_path, XLI_path, Xgt_path in tqdm.tqdm(
                zip(train_data_path_list, LI_data_path_list, label_data_path_list)):
            Xma = cv2.imread(Xma_path, 0)
            Xgt = cv2.imread(Xgt_path, 0)
            XLi = cv2.imread(XLI_path, 0)
            metal = np.where(Xma == 255, 255, 0)

            S_priori_predict_save_path = os.path.join(os.path.dirname(Xma_path) + "_LI_sinogram",
                                                      os.path.basename(Xma_path).split(".")[0])
            Tr_save_path = os.path.join(os.path.dirname(Xma_path) + "_metal_sinogram",
                                        os.path.basename(Xma_path).split(".")[0])
            Sgt_save_path = os.path.join(os.path.dirname(Xma_path) + "_label_sinogram",
                                         os.path.basename(Xma_path).split(".")[0])

            # if os.path.exists(S_priori_predict_save_path + ".npy") and os.path.exists(Tr_save_path + ".npy") \
            #         and os.path.exists(Sgt_save_path + ".npy"):
            #     continue
            print(Sgt_save_path)
            Tr = self.obj_FBP.project(metal)
            Tr = np.where(Tr > 0, 1, 0)
            Sgt = self.obj_FBP.project(Xgt)
            # Sma = self.obj_FBP.project(Xma)
            S_priori_predict = self.obj_FBP.project(XLi)
            # S_synthetic = S_priori_predict * Tr + (1 - Tr) * Sgt  # 作为INPAINTING网络的输入

            """
            此处如果归一化后，当前的存储空间并不能满足要求，每个npy大小在541k左右，
            所以打算在投影后使用uint8进行保存
            """
            S_priori_predict = S_priori_predict.astype(np.int16)
            Sgt = Sgt.astype(np.int16)
            Tr = Tr.astype(np.uint8)

            # S_synthetic = normalize_negative(S_synthetic)
            # S_priori_predict = normalize_negative(S_priori_predict)
            # Sgt = normalize_negative(Sgt)

            numpy.save(S_priori_predict_save_path, S_priori_predict)
            numpy.save(Tr_save_path, Tr)
            # numpy.save(S_synthetic_save_path, S_synthetic)
            numpy.save(Sgt_save_path, Sgt)
            pass

    def __get_sinogram_max_value__(self):
        S_max_value = 0
        for LI_sinogram_path, label_sinogram in tqdm.tqdm(
                zip(self.LI_sinogram_path_list, self.label_sinogram_path_list)):
            SLI = np.load(LI_sinogram_path)
            Sgt = np.load(label_sinogram)
            S_max_value = np.max([S_max_value, np.max(SLI), np.max(Sgt)])
        print(S_max_value)


class RatFemurInDuDoNetMultiDataLoader(object):
    """
    此类用来完成老鼠股骨数据的RegGan训练步骤
    当前版本的input包括3个channel Xma, XGateConv_predict, Xli
    """
    __train_root_path__ = r"F:\metal_artifact_data\RatFemur\S"

    def __init__(self, config):

        self.multiple_ratio = 40
        self.path_start_index = 167  # Todo 下次训练时改为0 和768
        self.path_end_index = 426
        self.logger = my_logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.batch_size = config.batchSize
        self.add_threshold = self.batch_size * self.multiple_ratio  # 每次添加的数据的量
        self.max_threshold = self.batch_size * 80
        self.config = config

        self.logger.info("Start to read path list")

        self.XGateConv_pred_path_list = list()  # GateConvolution 预测的结果
        self.Xgt_path_list = list()  # 图像域的ground truth 未经过投影和反投影的
        self.Xma_path_list = list()  # 原始图像域图像
        self.Xli_path_list = list()

        sample_path_list = [x for x in glob.glob(os.path.join(self.__train_root_path__, "*")) if
                            os.path.isdir(x) and x.find("nometal") != -1]
        metal_path_list = [x for x in glob.glob(os.path.join(self.__train_root_path__, "*")) if
                           os.path.isdir(x) and x.find(".metal") != -1 and x.find("-in_femur") == -1]
        for sample_path in sample_path_list:
            for metal_path in metal_path_list:
                metal_name = os.path.basename(metal_path).split("-")[-1]

                tmp_path_list = sorted(
                    glob.glob(os.path.join(sample_path, metal_name + "_synthesis_project_recon", "*")))
                self.Xma_path_list += tmp_path_list[self.path_start_index:self.path_end_index]

                tmp_path_list = sorted(glob.glob(os.path.join(sample_path, "reconstruction_npy", "*")))
                self.Xgt_path_list += tmp_path_list[self.path_start_index:self.path_end_index]

                tmp_path_list = sorted(glob.glob(os.path.join(sample_path, metal_name + "_LI_project_recon_mat", "*")))
                self.Xli_path_list += tmp_path_list[self.path_start_index:self.path_end_index]

        self.logger.info("Reading completed!")

        self.Xgt_path_list = self.Xgt_path_list[0:int(config.len_ratio * len(self.Xgt_path_list))]

        self.Xma_path_list = self.Xma_path_list[
                             0:int(config.len_ratio * len(self.Xma_path_list))]
        self.Xli_path_list = self.Xli_path_list[
                             0:int(config.len_ratio * len(self.Xli_path_list))]

        assert len(self.Xgt_path_list) == len(self.Xli_path_list) == len(self.Xma_path_list) != 0
        self.data_length = len(self.Xgt_path_list)
        # self.__shuffle__()

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()

        self.queue = Queue()

        self.adding_threading_number = 0  # 正在添加数据的线程数量，不控制的话会提交大量的提交数据任务
        self.adding_threading_number_mutex = threading.Lock()
        # self.add_data_to_queue()
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        length = int(self.data_length / self.batch_size)
        return length

    def __shuffle__(self):
        self.shuffled_index_list = [i for i in random.sample(range(0, self.data_length), self.data_length)]
        self.Xgt_path_list = [self.Xgt_path_list[i] for i in self.shuffled_index_list]
        # self.XGateConv_pred_path_list = [self.XGateConv_pred_path_list[i] for i in self.shuffled_index_list]
        self.Xma_path_list = [self.Xma_path_list[i] for i in self.shuffled_index_list]
        self.Xli_path_list = [self.Xli_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.batch_size * self.multiple_ratio
        self.__shuffle__()
        return self

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 还没人在加  自己去读或者已经没有剩下的了
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常

                    if future.result() is False:
                        del future
                        raise StopIteration
                return_batch_list = self.get_next_batch()
                return return_batch_list
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                return_batch_list = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < self.threading_pool_size:
                    my_logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return return_batch_list
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.data_length:
            self.add_threshold = self.data_length - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        self.logger.info("Get reading index completely!")
        return tmp_read_index_list

    def get_next_batch(self):

        Xgt_list = list()
        Xli_list = list()
        Xma_list = list()
        Xmetal_list = list()
        Sgt_list = list()
        Sli_list = list()
        Sma_list = list()
        Smetal_list = list()
        """
        "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal"""

        for i in range(self.batch_size):
            data = self.queue.get()
            Xgt = data["Xgt"].unsqueeze(0)
            Xli = data["Xli"].unsqueeze(0)
            Xma = data["Xma"].unsqueeze(0)
            Xmetal = data["Xmetal"].unsqueeze(0)
            Sgt = data["Sgt"].unsqueeze(0)
            Sma = data["Sma"].unsqueeze(0)
            Sli = data["Sli"].unsqueeze(0)
            Smetal = data["Smetal"].unsqueeze(0)

            self.queue.task_done()
            Xgt_list.append(Xgt)
            Xli_list.append(Xli)
            Xma_list.append(Xma)
            Xmetal_list.append(Xmetal)
            Sgt_list.append(Sgt)
            Sli_list.append(Sma)
            Sma_list.append(Sli)
            Smetal_list.append(Smetal)

        return [torch.cat(Xgt_list, 0),
                torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sgt_list, 0), torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0)]

    def add_data_to_queue(self, add_index_list):
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        my_logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")

        #  self.sample_index_mutex.acquire()
        # self.queue_operator_mutex.acquire()
        """
        按照InDuDoNet的源码，X与S域的数据都应该归一化到0,255之间
        """
        for ii in add_index_list:
            Xgt_path = self.Xgt_path_list[ii]
            Xma_path = self.Xma_path_list[ii]
            Xli_path = self.Xli_path_list[ii]

            Xli = np.load(Xli_path)
            Xli = normalize_npy(Xli)
            # Xli = np.pad(Xli, [(1, 1), (1, 1)], mode="reflect")
            Xli = torch.from_numpy(Xli).unsqueeze(0)
            Sli = op_module_fp(Xli / 255.) / 18. * 255.

            # Xli = torch.from_numpy(Xli).unsqueeze(0)

            Xma = np.load(Xma_path)
            Xma = normalize_npy(Xma)
            # Xma = np.pad(Xma, [(1, 1), (1, 1)], mode="reflect")

            Xmetal = np.where(Xma == 255, 1, 0)
            Xma = torch.from_numpy(Xma).unsqueeze(0)
            Sma = op_module_fp(Xma / 255.) / 18. * 255.
            Xmetal = torch.from_numpy(Xmetal).unsqueeze(0)
            Smetal = op_module_fp(Xmetal)
            Smetal = 1 - torch.where(Smetal > 0, 1, 0)

            Xgt = np.load(Xgt_path)
            Xgt = normalize_npy(Xgt)  # normalize to [-1,1]
            # Xgt = np.pad(Xgt, [(1, 1), (1, 1)], mode="reflect")
            Xgt = torch.from_numpy(Xgt).unsqueeze(0)
            Sgt = op_module_fp(Xgt / 255.) / 18. * 255.
            # print(torch.max(Sgt), torch.min(Sgt))
            # print("Dataloader Sma: ", torch.max(Sma), torch.min(Sma))
            # print("Dataloader Sli: ", torch.max(Sli), torch.min(Sli))
            # print("Dataloader Sgt: ", torch.max(Sgt), torch.min(Sgt))
            # print("Dataloader Smetal: ", torch.max(Smetal), torch.min(Smetal))
            # Xma = torch.from_numpy(Xma).unsqueeze(0)
            self.queue.put({
                "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal
            })

        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        # self.queue_operator_mutex.release()

        my_logger.info("After adding, current size of queue is " + str(int(self.queue.qsize())))
        my_logger.info(
            "The current sample index is " + str(self.current_read_sample_index) + "/" + str(self.data_length))
        return True


class RatFemurInDuDoNetMultiInferDataLoader(object):
    """
    此类用来完成老鼠股骨数据的InDuDoNet的推理步骤
    当前版本的input包括3个channel Xma, XGateConv_predict, Xli
    """
    __train_root_path__ = r"E:\data_transform_buffer\RatFemur\S"

    def __init__(self, config, op_module_fp):

        self.op_module_fp = op_module_fp

        self.multiple_ratio = 40
        self.path_start_index = 167  # Todo 下次训练时改为0 和768
        self.path_end_index = 426
        self.logger = my_logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.batch_size = config.batchSize
        self.add_threshold = self.batch_size * self.multiple_ratio  # 每次添加的数据的量
        self.max_threshold = self.batch_size * 80
        self.config = config

        self.inference_save_path = ""

        self.logger.info("Start to read path list")

        self.Xgt_path_list = list()  # 图像域的ground truth 未经过投影和反投影的
        self.Xma_path_list = list()  # 原始图像域图像
        self.Xli_path_list = list()

        sample_path_list = [x for x in glob.glob(os.path.join(self.__train_root_path__, "*")) if
                            os.path.isdir(x) and x.find("nometal") != -1]
        metal_path_list = [x for x in glob.glob(os.path.join(self.__train_root_path__, "*")) if
                           os.path.isdir(x) and x.find(".metal") != -1 and x.find("-in_femur") != -1]
        for sample_path in sample_path_list:
            for metal_path in metal_path_list:
                metal_name = os.path.basename(metal_path).split("-")[-1]

                tmp_path_list = sorted(
                    glob.glob(os.path.join(sample_path, metal_name + "_synthesis_project_recon", "*")))
                self.Xma_path_list += tmp_path_list[self.path_start_index:self.path_end_index]

                self.inference_save_path = os.path.join(os.path.dirname(os.path.dirname(tmp_path_list[0])),
                                                        metal_name + "_InDuDoNet_inference" + config.version)
                check_create_dir(self.inference_save_path)

                tmp_path_list = sorted(glob.glob(os.path.join(sample_path, "reconstruction_npy", "*")))
                self.Xgt_path_list += tmp_path_list[self.path_start_index:self.path_end_index]

                tmp_path_list = sorted(glob.glob(os.path.join(sample_path, metal_name + "_LI_project_recon_mat", "*")))
                self.Xli_path_list += tmp_path_list[self.path_start_index:self.path_end_index]

        self.logger.info("Reading completed!")

        self.Xgt_path_list = self.Xgt_path_list[0:int(config.len_ratio * len(self.Xgt_path_list))]

        self.Xma_path_list = self.Xma_path_list[
                             0:int(config.len_ratio * len(self.Xma_path_list))]
        self.Xli_path_list = self.Xli_path_list[
                             0:int(config.len_ratio * len(self.Xli_path_list))]

        assert len(self.Xgt_path_list) == len(self.Xli_path_list) == len(self.Xma_path_list) != 0
        self.data_length = len(self.Xgt_path_list)
        # self.__shuffle__()

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()

        self.queue = Queue()

        self.adding_threading_number = 0  # 正在添加数据的线程数量，不控制的话会提交大量的提交数据任务
        self.adding_threading_number_mutex = threading.Lock()
        # self.add_data_to_queue()
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        length = int(self.data_length / self.batch_size)
        return length

    def __shuffle__(self):
        self.shuffled_index_list = [i for i in random.sample(range(0, self.data_length), self.data_length)]
        self.Xgt_path_list = [self.Xgt_path_list[i] for i in self.shuffled_index_list]
        # self.XGateConv_pred_path_list = [self.XGateConv_pred_path_list[i] for i in self.shuffled_index_list]
        self.Xma_path_list = [self.Xma_path_list[i] for i in self.shuffled_index_list]
        self.Xli_path_list = [self.Xli_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.batch_size * self.multiple_ratio
        # self.__shuffle__()
        return self

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 还没人在加  自己去读或者已经没有剩下的了
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常

                    if future.result() is False:
                        del future
                        raise StopIteration
                return_batch_list = self.get_next_batch()
                return return_batch_list
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                return_batch_list = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < self.threading_pool_size:
                    my_logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return return_batch_list
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.data_length:
            self.add_threshold = self.data_length - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        self.logger.info("Get reading index completely!")
        return tmp_read_index_list

    def get_next_batch(self):

        Xgt_list = list()
        Xli_list = list()
        Xma_list = list()
        Xmetal_list = list()
        Sgt_list = list()
        Sli_list = list()
        Sma_list = list()
        Smetal_list = list()
        save_path_list = list()
        """
        "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal"""

        for i in range(self.batch_size):
            data = self.queue.get()
            Xgt = data["Xgt"].unsqueeze(0)
            Xli = data["Xli"].unsqueeze(0)
            Xma = data["Xma"].unsqueeze(0)
            Xmetal = data["Xmetal"].unsqueeze(0)
            Sgt = data["Sgt"].unsqueeze(0)
            Sma = data["Sma"].unsqueeze(0)
            Sli = data["Sli"].unsqueeze(0)
            Smetal = data["Smetal"].unsqueeze(0)
            save_path = data["save_path"]

            self.queue.task_done()
            Xgt_list.append(Xgt)
            Xli_list.append(Xli)
            Xma_list.append(Xma)
            Xmetal_list.append(Xmetal)
            Sgt_list.append(Sgt)
            Sli_list.append(Sma)
            Sma_list.append(Sli)
            Smetal_list.append(Smetal)
            save_path_list.append(save_path)

        return [torch.cat(Xgt_list, 0),
                torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sgt_list, 0), torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0), save_path_list]

    def add_data_to_queue(self, add_index_list):
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        my_logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")

        #  self.sample_index_mutex.acquire()
        # self.queue_operator_mutex.acquire()
        """
        按照InDuDoNet的源码，X与S域的数据都应该归一化到0,255之间
        此处的18经过遍历所有的投影域数据得来，为保证在输入到网络时，S的值域能够在0到255之间
        """
        for ii in add_index_list:
            Xgt_path = self.Xgt_path_list[ii]
            Xma_path = self.Xma_path_list[ii]
            Xli_path = self.Xli_path_list[ii]
            save_path = os.path.join(self.inference_save_path, os.path.basename(Xma_path).replace(".npy", ".png"))
            # save_path = Xma_path.replace("synthesis_project_recon", "InDuDoNet_inference"+self.config.version).replace(".npy", ".png")

            Xli = np.load(Xli_path)
            Xli = normalize_npy(Xli)
            # Xli = np.pad(Xli, [(1, 1), (1, 1)], mode="reflect")
            Xli = torch.from_numpy(Xli).unsqueeze(0)
            Sli = self.op_module_fp(Xli / 255.) / 18. * 255.

            # Xli = torch.from_numpy(Xli).unsqueeze(0)

            Xma = np.load(Xma_path)
            Xma = normalize_npy(Xma)
            # Xma = np.pad(Xma, [(1, 1), (1, 1)], mode="reflect")

            Xmetal = np.where(Xma == 255, 1, 0)
            Xma = torch.from_numpy(Xma).unsqueeze(0)
            Sma = self.op_module_fp(Xma / 255.) / 18. * 255.
            Xmetal = torch.from_numpy(Xmetal).unsqueeze(0)
            Smetal = self.op_module_fp(Xmetal)
            Smetal = 1 - torch.where(Smetal > 0, 1, 0)

            Xgt = np.load(Xgt_path)
            Xgt = normalize_npy(Xgt)  # normalize to [-1,1]
            # Xgt = np.pad(Xgt, [(1, 1), (1, 1)], mode="reflect")
            Xgt = torch.from_numpy(Xgt).unsqueeze(0)
            Sgt = self.op_module_fp(Xgt / 255.) / 18. * 255.
            # print(torch.max(Sgt), torch.min(Sgt))
            # print("Dataloader Sma: ", torch.max(Sma), torch.min(Sma))
            # print("Dataloader Sli: ", torch.max(Sli), torch.min(Sli))
            # print("Dataloader Sgt: ", torch.max(Sgt), torch.min(Sgt))
            # print("Dataloader Smetal: ", torch.max(Smetal), torch.min(Smetal))
            # Xma = torch.from_numpy(Xma).unsqueeze(0)
            self.queue.put({
                "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal,
                "save_path": save_path
            })

        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        # self.queue_operator_mutex.release()

        my_logger.info("After adding, current size of queue is " + str(int(self.queue.qsize())))
        my_logger.info(
            "The current sample index is " + str(self.current_read_sample_index) + "/" + str(self.data_length))
        return True


class RealRatFemurInDuDoNetMultiInferDataLoader(object):
    """
    此类用来完成真实老鼠股骨数据（非合成）的推理步骤
    当前版本的input包括3个channel Xma, XGateConv_predict, Xli
    """
    __train_root_path__ = r"F:\metal_artifact_data\RatFemur\S"

    def __init__(self, config):

        self.multiple_ratio = 40
        self.path_start_index = 167  # Todo 下次训练时改为0 和768
        self.path_end_index = 426
        self.logger = my_logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.batch_size = config.batchSize
        self.add_threshold = self.batch_size * self.multiple_ratio  # 每次添加的数据的量
        self.max_threshold = self.batch_size * 80
        self.config = config

        self.logger.info("Start to read path list")

        self.XGateConv_pred_path_list = list()  # GateConvolution 预测的结果
        self.Xgt_path_list = list()  # 图像域的ground truth 未经过投影和反投影的
        self.Xma_path_list = list()  # 原始图像域图像
        self.Xli_path_list = list()

        sample_path_list = [r"F:\metal_artifact_data\RatFemur\S\1.2.0.20211011.154452.metal-in_femur"]
        metal_path_list = [r"F:\metal_artifact_data\RatFemur\S\1.2.0.20211011.154452.metal-in_femur"]
        for sample_path in sample_path_list:
            for metal_path in metal_path_list:
                metal_name = os.path.basename(metal_path).split("-")[-1]

                tmp_path_list = sorted(glob.glob(os.path.join(sample_path, "reconstruction_npy", "*")))
                self.Xma_path_list += tmp_path_list[self.path_start_index:self.path_end_index]

                tmp_path_list = sorted(glob.glob(os.path.join(sample_path, metal_name + "_LI_project_recon_mat", "*")))
                self.Xli_path_list += tmp_path_list[self.path_start_index:self.path_end_index]
                inference_save_path = os.path.join(os.path.dirname(os.path.dirname(tmp_path_list[0])),
                                                   metal_name + "_InDuDoNet_inference")
                check_create_dir(inference_save_path)

        self.logger.info("Reading completed!")

        self.Xma_path_list = self.Xma_path_list[
                             0:int(config.len_ratio * len(self.Xma_path_list))]
        self.Xli_path_list = self.Xli_path_list[
                             0:int(config.len_ratio * len(self.Xli_path_list))]

        assert len(self.Xli_path_list) == len(self.Xma_path_list) != 0
        self.data_length = len(self.Xma_path_list)
        # self.__shuffle__()

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()

        self.queue = Queue()

        self.adding_threading_number = 0  # 正在添加数据的线程数量，不控制的话会提交大量的提交数据任务
        self.adding_threading_number_mutex = threading.Lock()
        # self.add_data_to_queue()
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        length = int(self.data_length / self.batch_size)
        return length

    def __shuffle__(self):
        self.shuffled_index_list = [i for i in random.sample(range(0, self.data_length), self.data_length)]
        self.Xma_path_list = [self.Xma_path_list[i] for i in self.shuffled_index_list]
        self.Xli_path_list = [self.Xli_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.batch_size * self.multiple_ratio
        return self

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 还没人在加  自己去读或者已经没有剩下的了
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常

                    if future.result() is False:
                        del future
                        raise StopIteration
                return_batch_list = self.get_next_batch()
                return return_batch_list
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                return_batch_list = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < self.threading_pool_size:
                    my_logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return return_batch_list
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.data_length:
            self.add_threshold = self.data_length - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        self.logger.info("Get reading index completely!")
        return tmp_read_index_list

    def get_next_batch(self):

        Xgt_list = list()
        Xli_list = list()
        Xma_list = list()
        Xmetal_list = list()
        Sgt_list = list()
        Sli_list = list()
        Sma_list = list()
        Smetal_list = list()
        save_path_list = list()
        """
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal"""

        for i in range(self.batch_size):
            data = self.queue.get()
            Xli = data["Xli"].unsqueeze(0)
            Xma = data["Xma"].unsqueeze(0)
            Xmetal = data["Xmetal"].unsqueeze(0)
            Sma = data["Sma"].unsqueeze(0)
            Sli = data["Sli"].unsqueeze(0)
            Smetal = data["Smetal"].unsqueeze(0)
            save_path = data["save_path"]

            self.queue.task_done()
            Xli_list.append(Xli)
            Xma_list.append(Xma)
            Xmetal_list.append(Xmetal)
            Sli_list.append(Sma)
            Sma_list.append(Sli)
            Smetal_list.append(Smetal)
            save_path_list.append(save_path)

        return [torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0), save_path_list]

    def add_data_to_queue(self, add_index_list):
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        my_logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")

        #  self.sample_index_mutex.acquire()
        # self.queue_operator_mutex.acquire()
        """
        按照InDuDoNet的源码，X与S域的数据都应该归一化到0,255之间
        此处的18经过遍历所有的投影域数据得来，为保证在输入到网络时，S的值域能够在0到255之间
        """
        for ii in add_index_list:
            Xma_path = self.Xma_path_list[ii]
            Xli_path = self.Xli_path_list[ii]
            save_path = Xma_path.replace("synthesis_project_recon", "InDuDoNet_inference").replace(".npy", ".png")

            Xli = np.load(Xli_path)
            Xli = normalize_npy(Xli)
            # Xli = np.pad(Xli, [(1, 1), (1, 1)], mode="reflect")
            Xli = torch.from_numpy(Xli).unsqueeze(0)
            Sli = op_module_fp(Xli / 255.) / 18. * 255.

            # Xli = torch.from_numpy(Xli).unsqueeze(0)

            Xma = np.load(Xma_path)
            Xma = normalize_npy(Xma)
            # Xma = np.pad(Xma, [(1, 1), (1, 1)], mode="reflect")

            Xmetal = np.where(Xma == 255, 1, 0)
            Xma = torch.from_numpy(Xma).unsqueeze(0)
            Sma = op_module_fp(Xma / 255.) / 18. * 255.
            Xmetal = torch.from_numpy(Xmetal).unsqueeze(0)
            Smetal = op_module_fp(Xmetal)
            Smetal = 1 - torch.where(Smetal > 0, 1, 0)

            # print(torch.max(Sgt), torch.min(Sgt))
            # print("Dataloader Sma: ", torch.max(Sma), torch.min(Sma))
            # print("Dataloader Sli: ", torch.max(Sli), torch.min(Sli))
            # print("Dataloader Sgt: ", torch.max(Sgt), torch.min(Sgt))
            # print("Dataloader Smetal: ", torch.max(Smetal), torch.min(Smetal))
            # Xma = torch.from_numpy(Xma).unsqueeze(0)
            self.queue.put({
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal,
                "save_path": save_path
            })

        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        # self.queue_operator_mutex.release()

        my_logger.info("After adding, current size of queue is " + str(int(self.queue.qsize())))
        my_logger.info(
            "The current sample index is " + str(self.current_read_sample_index) + "/" + str(self.data_length))
        return True


class DeeplesionInDuDoNetMultiDataLoader(object):
    """
    当前版本的input只有一个GatedConv的预测
    """
    # __train_root_path = r"F:\metal_artifact_data\easy_dataset\train"
    __train_root_path = r"E:\data_transform_buffer\metal_artifact\easy_dataset\train"

    def __init__(self, config, op_module_fp):
        self.op_module_fp = op_module_fp
        self.multiple_ratio = 40

        self.logger = my_logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.batch_size = config.batchSize
        self.add_threshold = self.batch_size * self.multiple_ratio  # 每次添加的数据的量
        self.max_threshold = self.batch_size * 80
        self.config = config
        self.logger.info("Start to read path list")

        self.XGateConv_pred_path_list = list()  # GateConvolution 预测的结果
        # self.XInDuDoNet_pred_path_list = list()  # InDuDoNet预测的结果
        self.all_Xgt_path_list = list()  # 图像域的ground truth 未经过投影和反投影的
        self.all_Xma_path_list = list()  # 原始图像域图像
        self.all_Xli_path_list = list()
        self.__letter__ = ['A', 'B', 'D']
        for c in self.__letter__:
            self.all_Xgt_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path, "C", "*")))
            self.all_Xma_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path, c, "*")))
            self.all_Xli_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path, c + "_LI", "*")))
        self.logger.info("Reading completed!")

        assert len(self.all_Xgt_path_list) == len(self.all_Xli_path_list) == len(self.all_Xma_path_list) != 0

        self.path_num = len(self.all_Xgt_path_list)
        self.ratio_path_num = int(self.path_num * self.config.len_ratio)
        self.actual_dataloader_length = int(self.ratio_path_num / self.batch_size)

        # self.__shuffle__()

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()

        self.queue = Queue()

        self.adding_threading_number = 0  # 正在添加数据的线程数量，不控制的话会提交大量的提交数据任务
        self.adding_threading_number_mutex = threading.Lock()

        self.S_normalize_coefficient = config.S_normalize_coefficient
        # self.add_data_to_queue()
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        return self.actual_dataloader_length

    def __shuffle__(self):
        self.logger.info("Start to shuffle...")
        self.shuffled_index_list = [i for i in random.sample(range(0, self.path_num), self.path_num)]
        self.all_Xgt_path_list = [self.all_Xgt_path_list[i] for i in self.shuffled_index_list]
        self.all_Xma_path_list = [self.all_Xma_path_list[i] for i in self.shuffled_index_list]
        self.all_Xli_path_list = [self.all_Xli_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.batch_size * self.multiple_ratio

        self.__shuffle__()
        self.Xgt_path_list = self.all_Xgt_path_list[0:int(self.config.len_ratio * len(self.all_Xgt_path_list))]
        self.Xma_path_list = self.all_Xma_path_list[
                             0:int(self.config.len_ratio * len(self.all_Xma_path_list))]
        self.Xli_path_list = self.all_Xli_path_list[
                             0:int(self.config.len_ratio * len(self.all_Xli_path_list))]
        return self

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 还没人在加  自己去读或者已经没有剩下的了
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常

                    if future.result() is False:
                        del future
                        raise StopIteration
                return_batch_list = self.get_next_batch()
                return return_batch_list
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                return_batch_list = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < self.threading_pool_size:
                    my_logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return return_batch_list
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.ratio_path_num:
            self.add_threshold = self.ratio_path_num - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        self.logger.info("Get reading index completely!")
        return tmp_read_index_list

    def get_next_batch(self):

        Xgt_list = list()
        Xli_list = list()
        Xma_list = list()
        Xmetal_list = list()
        Sgt_list = list()
        Sli_list = list()
        Sma_list = list()
        Smetal_list = list()

        for i in range(self.batch_size):
            data = self.queue.get()
            Xgt = data["Xgt"].unsqueeze(0)
            Xli = data["Xli"].unsqueeze(0)
            Xma = data["Xma"].unsqueeze(0)
            Xmetal = data["Xmetal"].unsqueeze(0)
            Sgt = data["Sgt"].unsqueeze(0)
            Sma = data["Sma"].unsqueeze(0)
            Sli = data["Sli"].unsqueeze(0)
            Smetal = data["Smetal"].unsqueeze(0)

            self.queue.task_done()
            Xgt_list.append(Xgt)
            Xli_list.append(Xli)
            Xma_list.append(Xma)
            Xmetal_list.append(Xmetal)
            Sgt_list.append(Sgt)
            Sli_list.append(Sma)
            Sma_list.append(Sli)
            Smetal_list.append(Smetal)

        return [torch.cat(Xgt_list, 0),
                torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sgt_list, 0), torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0)]

    def add_data_to_queue(self, add_index_list):
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        my_logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")

        #  self.sample_index_mutex.acquire()
        # self.queue_operator_mutex.acquire()
        """
        deeplesion 中数据的尺寸是256*256
        """
        for ii in add_index_list:
            Xgt_path = self.Xgt_path_list[ii]
            Xma_path = self.Xma_path_list[ii]
            Xli_path = self.Xli_path_list[ii]

            Xli = cv2.imread(Xli_path, 0)
            Xli = torch.from_numpy(Xli).unsqueeze(0).float()
            Sli = self.op_module_fp(Xli / 255.) / self.S_normalize_coefficient * 255.

            Xma = cv2.imread(Xma_path, 0)
            Xma = torch.from_numpy(Xma).unsqueeze(0).float()
            Sma = self.op_module_fp(Xma / 255.) / self.S_normalize_coefficient * 255.
            Xmetal = np.where(Xma == 255, 1, 0)  # .astype(np.uint8)
            Xmetal = torch.from_numpy(Xmetal)
            Smetal = self.op_module_fp(Xmetal)
            Smetal = 1 - torch.where(Smetal > 0, 1, 0)

            Xgt = cv2.imread(Xgt_path, 0)
            Xgt = torch.from_numpy(Xgt).unsqueeze(0).float()
            Sgt = self.op_module_fp(Xgt / 255.) / self.S_normalize_coefficient * 255.

            # Xma = torch.from_numpy(Xma).unsqueeze(0)
            self.queue.put({
                "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal
            })

        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        # self.queue_operator_mutex.release()

        my_logger.info("After adding, current size of queue is " + str(int(self.queue.qsize())))
        my_logger.info(
            "The current sample index is " + str(self.current_read_sample_index) + "/" + str(self.ratio_path_num))
        return True


class DeeplesionInDuDoNetMultiInferDataLoader(object):
    """
    当前版本的input只有一个GatedConv的预测
    """
    # __train_root_path = r"F:\metal_artifact_data\easy_dataset\train"
    __train_root_path__ = r"E:\data_transform_buffer\metal_artifact\easy_dataset\test"

    def __init__(self, config, op_module_fp):
        self.op_module_fp = op_module_fp
        self.multiple_ratio = 40
        self.inference_save_root_path = ""

        self.logger = my_logger
        self.current_read_sample_index = 0
        self.current_true_sample_index = 0
        self.batch_size = config.batchSize
        self.add_threshold = self.batch_size * self.multiple_ratio  # 每次添加的数据的量
        self.max_threshold = self.batch_size * 80
        self.config = config
        self.logger.info("Start to read path list")

        self.XGateConv_pred_path_list = list()  # GateConvolution 预测的结果
        # self.XInDuDoNet_pred_path_list = list()  # InDuDoNet预测的结果
        self.Xgt_path_list = list()  # 图像域的ground truth 未经过投影和反投影的
        self.Xma_path_list = list()  # 原始图像域图像
        self.Xli_path_list = list()
        self.__letter__ = ['A']
        for c in self.__letter__:
            self.inference_save_root_path = os.path.join(self.__train_root_path__, config.name + "_inference")
            if not os.path.exists(self.inference_save_root_path):
                os.mkdir(self.inference_save_root_path)
            self.Xgt_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path__, "B", "*")))
            self.Xma_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path__, c, "*")))
            self.Xli_path_list += sorted(
                glob.glob(os.path.join(self.__train_root_path__, c + "_LI", "*")))
        self.logger.info("Reading completed!")

        self.Xgt_path_list = self.Xgt_path_list[0:int(config.len_ratio * len(self.Xgt_path_list))]
        self.Xma_path_list = self.Xma_path_list[
                             0:int(config.len_ratio * len(self.Xma_path_list))]
        self.Xli_path_list = self.Xli_path_list[
                             0:int(config.len_ratio * len(self.Xli_path_list))]

        assert len(self.Xgt_path_list) == len(self.Xli_path_list) == len(self.Xma_path_list) != 0
        self.data_length = len(self.Xgt_path_list)
        # self.__shuffle__()

        # self.dataset_type = dataset_type
        # self.queue_max_size = 2  # The number of original data, not cropped data
        self.threading_pool_size = 3
        self.myThreadPool = ThreadPoolExecutor(max_workers=self.threading_pool_size)
        self.sample_index_mutex = threading.Lock()

        self.queue = Queue()

        self.adding_threading_number = 0  # 正在添加数据的线程数量，不控制的话会提交大量的提交数据任务
        self.adding_threading_number_mutex = threading.Lock()
        # self.add_data_to_queue()
        # if config.if_data_augmentation:
        #     self.preprocessModule = CustomPreprocessModule(configure.patch_size, configure.aug_probability)

    def __len__(self):
        length = int(self.data_length / self.batch_size)
        return length

    def __shuffle__(self):
        self.shuffled_index_list = [i for i in random.sample(range(0, self.data_length), self.data_length)]
        self.Xgt_path_list = [self.Xgt_path_list[i] for i in self.shuffled_index_list]
        self.Xma_path_list = [self.Xma_path_list[i] for i in self.shuffled_index_list]
        self.Xli_path_list = [self.Xli_path_list[i] for i in self.shuffled_index_list]

    def __iter__(self):
        self.current_read_sample_index = 0
        self.add_threshold = self.batch_size * self.multiple_ratio
        self.__shuffle__()
        return self

    def __next__(self):
        try:
            # one epoch finish
            if self.queue.qsize() < self.batch_size:
                if self.adding_threading_number == 0:  # 还没人在加  自己去读或者已经没有剩下的了
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    read_index_list = self._get_read_index_()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                    wait([future])  # 会阻塞  get_next_batch方法如果有剩余会继续添加，没有剩余会抛出异常

                    if future.result() is False:
                        del future
                        raise StopIteration
                return_batch_list = self.get_next_batch()
                return return_batch_list
            # The rest is enough and supplement the queue if the rest if not enough after read a batch
            else:
                return_batch_list = self.get_next_batch()
                tmp_queue_size = self.queue.qsize()
                if tmp_queue_size < self.add_threshold and self.adding_threading_number < self.threading_pool_size:
                    my_logger.info(str(threading.current_thread()) + ": current queue size is " + str(
                        tmp_queue_size) + " need to add")
                    read_index_list = self._get_read_index_()
                    self.adding_threading_number_mutex.acquire()
                    self.adding_threading_number = self.adding_threading_number + 1
                    self.adding_threading_number_mutex.release()
                    future = self.myThreadPool.submit(self.add_data_to_queue, read_index_list)
                return return_batch_list
        except KeyboardInterrupt:
            print("catch the control c order!")
            self.myThreadPool.shutdown()

        # my_logger.info(str(len(tmp_data_list)) + " patches has been added to list")

    def _get_read_index_(self):

        self.sample_index_mutex.acquire()
        tmp_read_index_list = list()
        if (self.current_read_sample_index + self.add_threshold) > self.data_length:
            self.add_threshold = self.data_length - self.current_read_sample_index
        if self.add_threshold == 0:
            self.sample_index_mutex.release()
            return tmp_read_index_list
        for i in range(self.add_threshold):
            tmp_read_index_list.append(self.current_read_sample_index)
            self.current_read_sample_index = self.current_read_sample_index + 1
        self.sample_index_mutex.release()
        self.logger.info("Get reading index completely!")
        return tmp_read_index_list

    def get_next_batch(self):

        Xgt_list = list()
        Xli_list = list()
        Xma_list = list()
        Xmetal_list = list()
        Sgt_list = list()
        Sli_list = list()
        Sma_list = list()
        Smetal_list = list()
        inference_save_path_list = list()

        for i in range(self.batch_size):
            data = self.queue.get()
            Xgt = data["Xgt"].unsqueeze(0)
            Xli = data["Xli"].unsqueeze(0)
            Xma = data["Xma"].unsqueeze(0)
            Xmetal = data["Xmetal"].unsqueeze(0)
            Sgt = data["Sgt"].unsqueeze(0)
            Sma = data["Sma"].unsqueeze(0)
            Sli = data["Sli"].unsqueeze(0)
            Smetal = data["Smetal"].unsqueeze(0)

            self.queue.task_done()
            Xgt_list.append(Xgt)
            Xli_list.append(Xli)
            Xma_list.append(Xma)
            Xmetal_list.append(Xmetal)
            Sgt_list.append(Sgt)
            Sli_list.append(Sma)
            Sma_list.append(Sli)
            Smetal_list.append(Smetal)
            inference_save_path_list.append(data["inference_save_path"])

        return [torch.cat(Xgt_list, 0),
                torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sgt_list, 0), torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0), inference_save_path_list]

    def add_data_to_queue(self, add_index_list):
        if len(add_index_list) == 0:
            self.adding_threading_number_mutex.acquire()
            self.adding_threading_number = self.adding_threading_number - 1
            self.adding_threading_number_mutex.release()
            return False
        my_logger.info("start to add " + str(len(add_index_list)) + " samples to queue!")

        #  self.sample_index_mutex.acquire()
        # self.queue_operator_mutex.acquire()
        """
        deeplesion 中数据的尺寸是256*256
        """
        for ii in add_index_list:
            Xgt_path = self.Xgt_path_list[ii]
            Xma_path = self.Xma_path_list[ii]
            Xli_path = self.Xli_path_list[ii]
            inference_save_path = os.path.join(self.inference_save_root_path,
                                               os.path.basename(Xma_path))

            Xli = cv2.imread(Xli_path, 0)
            # Xli = normalize_negative_v2(Xli)
            Xli = torch.from_numpy(Xli).unsqueeze(0).float()
            Sli = self.op_module_fp(Xli / 255.) / 18. * 255.
            # print(torch.max(Sli), torch.min(Sli))

            Xma = cv2.imread(Xma_path, 0)
            # Xma = normalize_negative_v2(Xma)
            Xma = torch.from_numpy(Xma).unsqueeze(0).float()
            Sma = self.op_module_fp(Xma / 255.) / 18. * 255.
            # print(torch.max(Sma), torch.min(Sma))
            Xmetal = np.where(Xma == 255, 1, 0)  # .astype(np.uint8)
            Xmetal = torch.from_numpy(Xmetal)
            Smetal = self.op_module_fp(Xmetal)
            Smetal = 1 - torch.where(Smetal > 0, 1, 0)

            Xgt = cv2.imread(Xgt_path, 0)
            # Xgt = normalize_negative_v2(Xgt)  # normalize to [-1,1]
            Xgt = torch.from_numpy(Xgt).unsqueeze(0).float()
            Sgt = self.op_module_fp(Xgt / 255.) / 18. * 255.
            # print(torch.max(Sgt), torch.min(Sgt))

            # Xma = torch.from_numpy(Xma).unsqueeze(0)
            self.queue.put({
                "Xgt": Xgt,
                "Xli": Xli,
                "Xma": Xma,
                "Xmetal": Xmetal,
                "Sgt": Sgt,
                "Sma": Sma,
                "Sli": Sli,
                "Smetal": Smetal,
                "inference_save_path": inference_save_path
            })

        self.adding_threading_number_mutex.acquire()
        self.adding_threading_number = self.adding_threading_number - 1
        self.adding_threading_number_mutex.release()
        # self.queue_operator_mutex.release()

        my_logger.info("After adding, current size of queue is " + str(int(self.queue.qsize())))
        my_logger.info(
            "The current sample index is " + str(self.current_read_sample_index) + "/" + str(self.data_length))
        return True


class DeeplesionInDuDoNetMultiDataLoaderVersion2(MultiDataLoaderBase):
    """
    这个类用来加载自己制作的deeplesion数据，数据的形式是.mat形式
    DeeplesionInDuDoNetMultiDataLoader类是用来加载鸡哥制作的deeplesion的数据，数据的形式是png
    除此之外，此类中读取的数据在制作时使用的是平板探测器
    而鸡哥制作的数据是使用弧形探测器
    上面两个是Xma  Xli  Xgt总的极值，中间两个是投影后的极值
    4.00285
    -0.706244
    tensor(173.2426)
    tensor(-2.6787)
    450000
    450000
    """

    def __init__(self, config, op_module_fp):

        super(DeeplesionInDuDoNetMultiDataLoaderVersion2, self).__init__(config)
        self.path_storge_save_file = "path_storage.csv"
        self.data_root_path = config.data_root_path
        self.folder_key_name = "train"
        if self.run_type == "inference":
            self.folder_key_name = "test"
        self.op_module_fp = op_module_fp
        print("执行子类初始化方法")
        self.max_value = 0
        self.min_value = 0

    def prepare_sample_dict(self):
        """
        简单升级一下，将文件路径的list存储到csv文件中，然后后面读取的时候直接读取csv文件就可以
        保存顺序如下：Xma_path,Xli_path,Xgt_path,Xmetal_path
        """
        self.logger.info("Start to make the list of data path")
        Xma_path_list = list()
        Xli_path_list = list()
        Xgt_path_list = list()
        Xmetal_path_list = list()

        if os.path.exists(self.path_storge_save_file):
            self.logger.info("Start to load file path from csv file")
            with open(self.path_storge_save_file, 'r') as my_file:
                csv_reader = csv.reader(my_file)
                for tmp_path_list in csv_reader:
                    Xma_path_list.append(tmp_path_list[0])
                    Xli_path_list.append(tmp_path_list[1])
                    Xgt_path_list.append(tmp_path_list[2])
                    Xmetal_path_list.append(tmp_path_list[3])
            self.logger.info("Loading finished!")
        else:
            folder_path_list = sorted(glob.glob(os.path.join(self.data_root_path, "*")))[0:9]
            for folder_path in tqdm.tqdm(folder_path_list):  # Images_png_03
                sample_path_list = glob.glob(os.path.join(folder_path, self.folder_key_name, "*"))
                for sample_path in sample_path_list:  # 000001_03_01
                    number_path_list = glob.glob(os.path.join(sample_path, "*"))
                    for number_path in number_path_list:  # 103
                        tmp_path_list = glob.glob(os.path.join(number_path, "LI_CT_*"))
                        Xli_path_list += tmp_path_list
                        Xma_path_list += glob.glob(os.path.join(number_path, "ma_CT_*"))
                        Xmetal_path_list += glob.glob(os.path.join(number_path, "metal_CT_*"))
                        assert len(Xli_path_list) == len(Xma_path_list) == len(Xmetal_path_list)
                        Xgt_path_list += [glob.glob(os.path.join(number_path, "groundtruth.mat"))[0] for i in
                                          range(len(tmp_path_list))]
            self.logger.info("Start to storage the file path")
            with open(self.path_storge_save_file, 'w', newline='') as my_file:
                csv_writer = csv.writer(my_file)
                for Xma_path, Xli_path, Xgt_path, Xmetal_path in zip(Xma_path_list, Xli_path_list, Xgt_path_list,
                                                                     Xmetal_path_list):
                    csv_writer.writerow([Xma_path, Xli_path, Xgt_path, Xmetal_path])
            self.logger.info("Storage finished!")

        assert len(Xma_path_list) == len(Xli_path_list) == len(Xgt_path_list) == len(Xmetal_path_list) != 0
        self.sample_path_dict["Xma_path_list"] = Xma_path_list
        self.sample_path_dict["Xli_path_list"] = Xli_path_list
        self.sample_path_dict["Xgt_path_list"] = Xgt_path_list
        self.sample_path_dict["Xmetal_path_list"] = Xmetal_path_list

        self.logger.info("Data path reading completed!")

        for tmp_key in self.sample_path_dict.keys():
            self.sample_path_dict[tmp_key] = self.sample_path_dict[tmp_key][
                                             0:int(self.config.len_ratio * len(self.sample_path_dict[tmp_key]))]

        self.logger.info("The cropping of data path list according to the len_ratio has finished!")

    def read_one_data(self, add_index):
        """
        InDuDoNet原始代码中网络运行时，数据的值域都是0,255 无论是X 还是S
        """
        return_list = list()
        Xma_path = self.sample_path_dict["Xma_path_list"][add_index]
        Xli_path = self.sample_path_dict["Xli_path_list"][add_index]
        Xgt_path = self.sample_path_dict["Xgt_path_list"][add_index]
        Xmetal_path = self.sample_path_dict["Xmetal_path_list"][add_index]
        if self.run_type == "inference":
            save_path = self.sample_path_dict["save_path_list"][add_index]

        try:
            Xma = scipy.io.loadmat(Xma_path)['ma_CT']
            Xli = scipy.io.loadmat(Xli_path)['LI_CT']
            Xgt = scipy.io.loadmat(Xgt_path)['gt_CT']
            Xmetal = scipy.io.loadmat(Xmetal_path)['metal_CT']

            # self.max_value = max(self.max_value, np.max([Xma, Xli, Xgt]))
            # self.min_value = min(self.min_value, np.min([Xma, Xli, Xgt]))

            Xma = np.clip(Xma, 0, 0.5)
            Xli = np.clip(Xli, 0, 0.5)
            Xgt = np.clip(Xgt, 0, 0.5)

            Xli = torch.from_numpy(Xli).unsqueeze(0).float()
            Sli = self.op_module_fp(Xli) / 190. * 255.

            Xma = torch.from_numpy(Xma).unsqueeze(0).float()
            Sma = self.op_module_fp(Xma) / 190. * 255.

            Xmetal = torch.from_numpy(Xmetal).unsqueeze(0).float()
            Smetal = self.op_module_fp(Xmetal)
            Smetal = torch.where(Smetal > 0, 1, 0)

            Xgt = torch.from_numpy(Xgt).unsqueeze(0).float()
            Sgt = self.op_module_fp(Xgt) / 190. * 255.

            Xma = Xma * 255.
            Xli = Xli * 255.
            Xgt = Xgt * 255.

            return_list.append([Xgt, Xli, Xma, Xmetal, Sgt, Sma, Sli, Smetal])

            # if self.run_type == "inference":
            #     return_list.append([metal_trace_1, Sprior_1, Sgt_1, __add_suffix_for_path__(save_path, "_1")])
            #     return_list.append([metal_trace_2, Sprior_2, Sgt_2, __add_suffix_for_path__(save_path, "_2")])
            # elif self.run_type == "train":
            #     return_list.append([metal_trace_1, Sprior_1, Sgt_1])
            #     return_list.append([metal_trace_2, Sprior_2, Sgt_2])
            return return_list
        except Exception as e:
            self.logger.error(e)
            self.logger.error(traceback.format_exc())
            return return_list


class TempPaperDataSet(Dataset):
    def __init__(self, op_module_fp):
        self.op_module_fp = op_module_fp
        self.S_normalize_coefficient = 7.
        self.length = 2
        self.Xma_path_list = [
            r"E:\data_transform_buffer\metal_artifact\easy_dataset\test\A\000376_16_02_319_9.png",
            r"E:\data_transform_buffer\metal_artifact\easy_dataset\test\A\000372_01_01_147_2.png"
        ]
        self.Xli_path_list = [
            r"E:\data_transform_buffer\metal_artifact\easy_dataset\test\A_LI\000376_16_02_319_9.png",
            r"E:\data_transform_buffer\metal_artifact\easy_dataset\test\A_LI\000372_01_01_147_2.png"
        ]
        self.Xgt_path_list = [
            r"E:\data_transform_buffer\metal_artifact\easy_dataset\test\B\000376_16_02_319_9.png",
            r"E:\data_transform_buffer\metal_artifact\easy_dataset\test\B\000372_01_01_147_2.png"
        ]

    def __len__(self):
        return self.length

    def __getitem__(self, item):
        Xgt_path = self.Xgt_path_list[item]
        Xma_path = self.Xma_path_list[item]
        Xli_path = self.Xli_path_list[item]

        Xli = cv2.imread(Xli_path, 0)
        Xli = torch.from_numpy(Xli).unsqueeze(0).float()
        Sli = self.op_module_fp(Xli / 255.) / self.S_normalize_coefficient * 255.

        Xma = cv2.imread(Xma_path, 0)
        Xma = torch.from_numpy(Xma).unsqueeze(0).float()
        Sma = self.op_module_fp(Xma / 255.) / self.S_normalize_coefficient * 255.

        Xmetal = np.where(Xma == 255, 1, 0)  # .astype(np.uint8)
        Xmetal = torch.from_numpy(Xmetal)
        Smetal = self.op_module_fp(Xmetal)
        Smetal = 1 - torch.where(Smetal > 0, 1, 0)

        Xgt = cv2.imread(Xgt_path, 0)
        Xgt = torch.from_numpy(Xgt).unsqueeze(0).float()
        Sgt = self.op_module_fp(Xgt / 255.) / self.S_normalize_coefficient * 255.

        return Xgt, Xli, Xma, Xmetal, Sgt, Sma, Sli, Smetal


def get_temp_dataloader(op_module_fp):
    dataloader = DataLoader(dataset=TempPaperDataSet(op_module_fp), batch_size=2, num_workers=0,
                            drop_last=False)
    return dataloader
