# encoding=utf-8
import os
import pickle
from seg_common.BatchLoader import BatchLoaderFactory
from seg_common.Annotation import CommonAnnotation
from seg_system import ApplicationConfig
from nnunet.preprocessing.preprocessing import GenericPreprocessor
from seg_system.Segmentation.service.Nnunet.NnunetPreProcess import NnunetPreProcess


class NnunetBatchFactory(BatchLoaderFactory):
    """
        由于Nnunet的框架比较特殊,所以采用伤害比较小的处理方式

        2D to nifti
            原始框架中,from_path是原始图像的存储位置, to_path是分割的原始图像
            封装的通用执行函数,IPredictHandler.predict并未传递更多的数据到BatchFactory中
            所以通过from_path直接在它上一级创建一个临时目录用于存储生成的nii.gz图像
            在计算完成后删除次文件夹

    """

    def __init__(self, param_idx: int = ApplicationConfig.NetConfig.SEGMENTATION_USE_NNUNET_BIG):
        super(NnunetBatchFactory, self).__init__()
        self.from_path = ""
        self.tmp_nii_path = ""
        self.idx_to_token, self.token_to_idx = [], dict()
        self.token_to_token = dict()  # 建立png到nii.gz的映射
        self.token_to_properties = dict()  # 这里是用于计算的属性,看来需要修改IPredictHandler了

        # self.transform = transforms.Compose([
        #     transforms.Resize([384, 384]),
        #     transforms.ToTensor()
        # ])

        self.param_idx = param_idx

        # nnunet的trainer和数据预处理合并,所以这里直接选择初始化
        self.plans: dict = None
        self.preprocessor: GenericPreprocessor = None
        self.init_preprocessor()

    def init_preprocessor(self):
        path = os.path.join(ApplicationConfig.PathConfig.SYSTEM_RESOURCE,
                            ApplicationConfig.NetConfig.SEGMENTATION_DICT[self.param_idx],
                            'plans.pkl')
        with open(path, 'rb') as f:
            self.plans = pickle.load(f)

            # d_n = collections.OrderedDict()
            # d_m = collections.OrderedDict()
            d_n = self.plans['normalization_schemes']
            d_m = self.plans['use_mask_for_norm']
            d_t = self.plans['transpose_forward']
            d_i = self.plans['dataset_properties']['intensityproperties']

            self.preprocessor = GenericPreprocessor(d_n, d_m, d_t, d_i)

    @CommonAnnotation.override()
    def load_data(self, path: str, **kwargs):
        self.idx_to_token.clear()
        self.token_to_idx.clear()
        self.token_to_token.clear()
        self.from_path = path
        self.tmp_nii_path = os.path.join(os.path.abspath(os.path.join(self.from_path, os.path.pardir)),
                                         ApplicationConfig.PathConfig.ORIGIN_NNUNET_TMP)

        if os.path.exists(self.tmp_nii_path):
            NnunetPreProcess.try_rm_dir(self.tmp_nii_path)
        ApplicationConfig.PathConfig.make_dir(self.tmp_nii_path)

        for each_file in os.listdir(path):
            mat_path = os.path.join(self.from_path, each_file)
            file_name = NnunetPreProcess.convert2nifti(mat_path, self.tmp_nii_path)

            if file_name is not None:
                # token 2 token建立png到nii.gz文件的映射关系
                self.idx_to_token.append(each_file)
                self.token_to_idx[each_file] = len(self.idx_to_token) - 1
                self.token_to_token[each_file] = file_name

    @CommonAnnotation.override()
    def prepare_data(self, index: int):
        # 这里的数据需要改成处理3D的
        token = self.idx_to_token[index]
        nii_token = self.token_to_token[token]
        # nii_token_path = os.path.join(self.tmp_nii_path, nii_token)
        nii_token_path = [os.path.join(self.tmp_nii_path, each) for each in nii_token]

        stage = list(self.plans['plans_per_stage'].keys())[0]
        stage_plans = self.plans['plans_per_stage'][stage]['current_spacing']
        # 对于非CT类的数据,直接进行单个图像的crop, resample, normalize
        # seg数据(_)不知道干吗,使用_代替
        d, _, properties = self.preprocessor.preprocess_test_case(nii_token_path, stage_plans)
        # properties的crop_bbox用于恢复相对位置
        self.token_to_properties[token] = properties
        return d, index

    @CommonAnnotation.override()
    def get_data_lens(self):
        return len(self.idx_to_token)

    @CommonAnnotation.override()
    def get_index(self, tokens):
        if not isinstance(tokens, (list, tuple)):
            return self.token_to_idx.get(tokens, 0)
        return [self.get_index(token) for token in tokens]

    @CommonAnnotation.override()
    def get_tokens(self, indices):
        if not isinstance(indices, (list, tuple)):
            return self.idx_to_token[indices]
        return [self.idx_to_token[index] for index in indices]
