import datetime
import json
import math
import os
import sys
import time
from concurrent.futures import ThreadPoolExecutor
import pydicom
import SimpleITK as sitk
from ExtendScripts.DicomAutoConvert.DicomDataCheck import DicomDataCheck


class ConvertErrorDefine:
    CHECK_ID_NOT_FOUND_ERROR = "无法搜索到检查号字段"
    STUDY_INFORMATION_NOT_FOUND_ERROR = "无法搜索到检查部位字段"
    STUDY_INFORMATION_NOT_MATCH_ERROR = "检查部位关键词匹配异常"
    FRAME_NUMBER_LOSS_ERROR = "帧缺失异常"
    NOT_CHEST_DATA_SOURCE_ERROR = "非胸部CT数据源异常(算法质量评估异常)"
    WINDOWS_LOSS_DATA_SOURCE_ERROR = "胸部窗缺失数据(或无法读取)异常"
    UNEXCEPTION_OCCUR_ERROR = "程序未定义错误: {}"


class RawDataCollection:
    """文件结构映射
        parent1:
            - dcm1
            - dcm2
        parent2:
            - dcm1
            - dcm2
    """
    def __init__(self, root_start: str):
        self.root_start = root_start
        self.root_dcm_dict = {}
        self.most_match_serial_id = {}

        self.BASIC_INFO_ACCESSION_NUMBER = (0x0008, 0x0050)
        self.BASIC_INFO_STUDY_DESCRIPTION = (0x0008, 0x1030)

    def save_error_to_csv(self, error_dcm_dict: dict, default_save_name: str = "error_dcm_files.csv"):
        with open(default_save_name, 'w', encoding='utf-8') as f:
            if sys.platform.startswith("win"):
                split_char = "\n"
            else:
                split_char = os.linesep

            f.write("文件路径, 错误原因" + split_char)
            # f.flush()

            for each_error in error_dcm_dict:
                f.write("{}, {}".format(each_error, error_dcm_dict[each_error]) + split_char)
            # f.flush()
        return True

    def convert_clean_dcm_dict_to_nrrd(self, output_nrrd_path: str, json_path: str):
        os.makedirs(output_nrrd_path, exist_ok=True)
        json_d = {}

        l1 = list(self.root_dcm_dict.keys())
        for index in range(len(l1)):
            each_clean_key = l1[index]
            ds = pydicom.dcmread(os.path.join(each_clean_key, self.root_dcm_dict[each_clean_key][0]))
            check_id = ds.AccessionNumber
            serial_id = self.most_match_serial_id[each_clean_key]

            reader = sitk.ImageSeriesReader()
            # series_id = reader.GetGDCMSeriesIDs(abs_dir)[-1]
            files = reader.GetGDCMSeriesFileNames(each_clean_key, serial_id)
            file_format = "DCMTK"  # 还有一个 GDCM
            save_path = os.path.join(output_nrrd_path, check_id + ".nrrd")

            json_d[check_id] = {
                "files": files,
                "format": file_format,
                "output": save_path
            }
            print(f"[Created Json][{((index + 1) / len(l1) * 100)}] {check_id}")

        with open(json_path, "w", encoding='utf-8') as json_file:
            json_file.write(json.dumps(json_d))
        print("Created json file {}".format(json_path))


    def check_every_study(self):
        info_clean_dcm_dict = {}
        error_dcm_dict = {}

        # 第一部分过滤基础信息
        l1 = list(self.root_dcm_dict.keys())
        for index in range(len(l1)):
            each_key = l1[index]
            basic_error_info = self.check_basic_dcm_information(each_key, self.root_dcm_dict[each_key])
            if basic_error_info != "":
                error_dcm_dict[each_key] = basic_error_info
            else:
                info_clean_dcm_dict[each_key] = self.root_dcm_dict[each_key]
            print("[Info Filter][{}] {} - {}".format(((index + 1) / len(l1) * 100), each_key, "ok" if basic_error_info == "" else basic_error_info))

        # 第二部分过滤数据异常
        data_clean_dcm_dict = {}
        l2 = list(info_clean_dcm_dict.keys())
        for index in range(len(l2)):
            each_key = l2[index]
            basic_error_info = self.check_dcm_data_error(each_key, self.root_dcm_dict[each_key])
            if basic_error_info != "":
                error_dcm_dict[each_key] = basic_error_info
            else:
                data_clean_dcm_dict[each_key] = info_clean_dcm_dict[each_key]
            print("[Data Filter][{}] {} - {}".format(((index + 1) / len(l2) * 100), each_key, "ok" if basic_error_info == "" else basic_error_info))

        self.root_dcm_dict = data_clean_dcm_dict
        print("############### Cleaned DCM List ##############")
        print(self.root_dcm_dict.keys())
        return error_dcm_dict

    def check_every_study_parallel(self, default_num_process: int = 8):
        # TODO: 步骤太多自动并行识别异常，手动使用线程池进行重写
        #       未测试是否出现结果混乱的情况, 此代码必须在大量压测基础流程后再去使用，无法追踪错误
        exec_b = ThreadPoolExecutor()  # 减少bug
        exec_d = ThreadPoolExecutor()
        info_clean_dcm_dict = {}
        error_dcm_dict = {}

        # 第一部分过滤基础信息
        l1 = list(self.root_dcm_dict.keys())
        total1 = math.ceil(len(l1) / default_num_process)
        for index in range(total1):
            future_list = []
            temp_key = l1[index * default_num_process: (index + 1) * default_num_process]
            start = time.perf_counter()
            for each_key in temp_key:
                future_list.append((each_key, exec_b.submit(self.check_basic_dcm_information, each_key, self.root_dcm_dict[each_key])))

            for each_key, each_f in future_list:
                basic_error_info = each_f.result()
                if basic_error_info != "":
                    error_dcm_dict[each_key] = basic_error_info
                else:
                    info_clean_dcm_dict[each_key] = self.root_dcm_dict[each_key]
                end = time.perf_counter()
                print("[Info Filter][{}%][{}s] {} - {}".format(((len(error_dcm_dict) + len(info_clean_dcm_dict)) / len(l1) * 100),
                                                               end - start, each_key,
                                                               "ok" if basic_error_info == "" else basic_error_info))
        # 第二部分过滤数据异常
        data_clean_dcm_dict = {}
        len1_err_dict = len(error_dcm_dict)
        l2 = list(info_clean_dcm_dict.keys())
        total2 = math.ceil(len(l2) / default_num_process)
        for index in range(total2):
            future_list = []
            temp_key = l2[index * default_num_process: (index + 1) * default_num_process]
            start = time.perf_counter()
            for each_key in temp_key:
                future_list.append(
                    (each_key, exec_d.submit(self.check_dcm_data_error, each_key, self.root_dcm_dict[each_key])))

            for each_key, each_f in future_list:
                basic_error_info = each_f.result()
                if basic_error_info != "":
                    error_dcm_dict[each_key] = basic_error_info
                else:
                    data_clean_dcm_dict[each_key] = info_clean_dcm_dict[each_key]
                end = time.perf_counter()
                print("[Data Filter][{}%][{}s] {} - {}".format(((len(data_clean_dcm_dict) + len(error_dcm_dict) - len1_err_dict) / len(l2) * 100),
                                                               end - start, each_key,
                                                               "ok" if basic_error_info == "" else basic_error_info))

        self.root_dcm_dict = data_clean_dcm_dict
        print("############### Cleaned DCM List ##############")
        print(self.root_dcm_dict.keys())
        return error_dcm_dict

    def check_dcm_data_error(self, root_dir: str, dcm_file_list: list):
        # TODO: 1. 判断是否能正常读取, 这也包含了窗是否丢失的判断?
        try:
            if not DicomDataCheck.sitk_windows_loss(root_dir):
                return ConvertErrorDefine.WINDOWS_LOSS_DATA_SOURCE_ERROR
        except Exception as e:
            return ConvertErrorDefine.UNEXCEPTION_OCCUR_ERROR.format(str(e))

        # TODO: 2. 判断帧丢失, 可能需要v1, v2组合
        try:
            if not DicomDataCheck.sitk_check_frame_loss_v3(root_dir, dcm_file_list):
                return ConvertErrorDefine.FRAME_NUMBER_LOSS_ERROR
        except Exception as e:
            return ConvertErrorDefine.UNEXCEPTION_OCCUR_ERROR.format(str(e))

        # TODO: 3. 判断帧内容是否合格
        try:
            quality_list, match_index = DicomDataCheck.sitk_data_useable_check(root_dir, dcm_file_list)
            if match_index == -1:
                return ConvertErrorDefine.NOT_CHEST_DATA_SOURCE_ERROR
            self.most_match_serial_id[root_dir] = quality_list[match_index][0]
        except Exception as e:
            return ConvertErrorDefine.UNEXCEPTION_OCCUR_ERROR.format(str(e))

        return ""

    def check_basic_dcm_information(self, root_dir: str, dcm_file_list: list):
        for each_dcm_file in dcm_file_list:
            try:
                ds = pydicom.dcmread(os.path.join(root_dir, each_dcm_file))
            except Exception as e:
                return str(e)
            if self.BASIC_INFO_ACCESSION_NUMBER not in ds:
                return ConvertErrorDefine.CHECK_ID_NOT_FOUND_ERROR
            if self.BASIC_INFO_STUDY_DESCRIPTION not in ds:
                return ConvertErrorDefine.STUDY_INFORMATION_NOT_FOUND_ERROR

            match_list_keys = ["Thorax", "chest", "CHEST", "胸部平扫", "胸部CT平扫"]
            for each_key in match_list_keys:
                if each_key in ds.StudyDescription:
                    return ""
            return ConvertErrorDefine.STUDY_INFORMATION_NOT_MATCH_ERROR

    def get_root_dcm_files(self):
        for root, dirs, files in os.walk(self.root_start):
            filter_dcm_file = []
            for each_file in files:
                if each_file.lower().endswith(".dcm"):
                    filter_dcm_file.append(each_file)

            if len(filter_dcm_file) == 0:
                continue

            # TODO: 文件夹可能存在多个脏数据
            self.root_dcm_dict[root] = filter_dcm_file

def generate_json(root_start: str, output_nrrd_path: str = None):
    collection = RawDataCollection(root_start)
    collection.get_root_dcm_files()
    error_dict_list = collection.check_every_study()
    # error_dict_list = collection.check_every_study_parallel()  # TODO: 仅测试

    json_name = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f") + '.json'

    collection.save_error_to_csv(error_dict_list)
    if output_nrrd_path is None:
        collection.convert_clean_dcm_dict_to_nrrd(os.path.join(os.path.dirname(os.path.abspath(__file__)), "outputnrrd"),
                                                  os.path.join(os.path.dirname(os.path.abspath(__file__)), json_name))
    else:
        collection.convert_clean_dcm_dict_to_nrrd(output_nrrd_path,
                                                  os.path.join(os.path.dirname(os.path.abspath(__file__)), json_name))

    return os.path.join(os.path.dirname(os.path.abspath(__file__)), json_name)


def process_json(script_path: str, json_path: str, slicer_exe: str):
    import subprocess
    command = [
        slicer_exe,
        "--python-script",
        script_path,
        json_path
    ]
    result = subprocess.run(command)
    if result.returncode != 0:
        msg = f"注意，文件{json_path} Slicer的转换步骤可能遇到中断，当前返回值: {result.returncode}"
    else:
        msg = f"文件{json_path} Slicer转换步骤执行完成"
    # print(msg)
    return json_path, msg


def split_json(json_path: str, split_number: int, split_json_path: str):
    file_abs = os.path.join(os.path.dirname(os.path.abspath(__file__)))
    if split_json_path is None:
        t = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
        split_json_path = os.path.join(file_abs, "SplitCache", t)

    os.makedirs(split_json_path, exist_ok=True)
    split_return_list = []
    with open(json_path, "r", encoding='utf-8') as f:
        json_data = json.load(f)
        index = 0
        split_json_name = "split_json_{}.json"

        keys = list(json_data.keys())
        while True:
            slicer_keys = keys[index * split_number: (index + 1) * split_number]
            temp_d = {}
            for k in slicer_keys:
                temp_d[k] = json_data[k]

            s_p = os.path.join(split_json_path, split_json_name.format(index))
            with open(s_p, "w", encoding='utf-8') as j:
                j.write(json.dumps(temp_d))

            print("[Created Split Json][{}] {}".format((index + 1) / math.ceil(len(keys) / split_number) * 100,
                                                       s_p))
            split_return_list.append(s_p)
            index += 1
            if len(slicer_keys) != split_number:
                break
    return split_return_list


def run_slicer_script(script_path: str, json_path: str, slicer_exe: str,
                      split_number: int = 50,
                      split_json_path: str = None,
                      slicer_instance_number: int = 2):
    # 由于slicer可以多开，且太长的json断开后不太好找异常数据，所以填写此封装
    # TODO: 注意使用此代码，如果异常中断需要搜索slicer进程，很多时候可能无法正常释放

    # 1. split
    split_json_list = split_json(json_path, split_number, split_json_path)

    # 2. manager thread to run
    exec_t = ThreadPoolExecutor()
    total = math.ceil(len(split_json_list) / slicer_instance_number)
    count = 0
    for i in range(total):
        future_list = []
        temp_json = split_json_list[i * slicer_instance_number : (i + 1) * slicer_instance_number]
        start = time.perf_counter()
        for j in temp_json:
            future_list.append(exec_t.submit(process_json, script_path, j, slicer_exe))
        for each_f in future_list:
            result = each_f.result()
            end = time.perf_counter()
            count += 1
            print("[Convert Nrrd][{}%][{}s] {}".format(round(count / len(split_json_list) * 100, 2),
                                                       round(end - start, 2), result[1]))




if __name__ == '__main__':
    """压力测试已知bug:
        1. 部分数据可能存在多种spacing(Slicer Warning)，Slicer界面导入会自动创建一个TransformNode，代码导入程序失效退出
            - Slicer: Image slices are not equally spaced (5 spacing was expected, 10 spacing was found
            - 估计需要将处理好的数据按照200一组进行划分，或者对筛选流程进行修改，找出这类数据
            - 标记的数据check_id: CT1596808(2000), CT1592279(8000), QH-196503(8000)
                - 基本确定为中间丢帧大概率报错，末尾可以，其中丢帧的文件很可能连sitk的warning都不存在，无法拦截
            - XX-219416(8000, sitk无warning, 无检测到丢帧)
                - Slicer: Geometric issues were found with 1 of the series. Please use caution.
        2. (pydicom) UserWarning: End of file reached before delimiter (fffe, e0dd) found in file 
            - 330204194705075020\41961191\CT1351005\1.3.12.2.1107.5.1.4.92483.30000021011400053913000069629.dcm
            - 330227195008291499\41961173\WX-123848/1.2.392.200036.9123.100.11.15114374080817483161784846406845902.dcm
            - 330227195312051828\41961191\CT1294731/1.3.12.2.1107.5.1.4.92483.30000020090406575380500013153.dcm
            - 33022719650505443X\41961192\JS-748323/1.2.840.113704.1.111.2476.1676942884.14027.dcm
            - 332625195708295627\05740014\353080/1.3.12.2.1107.5.1.4.115724.30000024080114483015600201752.dcm
            
        TODO: 并行化部分代码，并简化部分流程以提升数据处理速度
    """


    """######### 给generate_json的参数 #############"""
    root_start = r"E:\Datasets\SpecialBugs"
    output_nrrd_path = r"E:\Datasets\OutputNrrd"
    json_path = generate_json(root_start, output_nrrd_path); print(json_path)
    # json_path = r'E:\Projects\heart-ca-project\ExtendScripts\DicomAutoConvert\2025_07_13_02_05_11_426261.json'
    # print(generate_json(root_start))

    """######### 给run_slicer_script的参数 #############"""
    # win10旧版本用gbk,新版本用utf-8
    script_path = r'E:\Projects\heart-ca-project\ExtendScripts\DicomAutoConvert\SlicerScripts\DcmConvertWithSlicer.py'
    slicer_exe = r'E:\IDEs\Slicer 5.8.1\Slicer.exe'

    # 直接处理, 数量太少不够分割，或者debug一些数据的时候使用
    # print(process_json(script_path, json_path, slicer_exe))

    # 切割后处理, 一般开4-8个, cpu60-70最好, 如果线程之间的时间差距过大(秒级别)，说明电脑过载了
    # split_number 填30-50, 后面报错了好找数据
    run_slicer_script(script_path, json_path, slicer_exe, slicer_instance_number=6)