import os
import pydicom
import numpy as np
from scipy.ndimage import zoom
from concurrent.futures import ThreadPoolExecutor
import time
def get_dicom_files(directory):
    dicom_files = []
    for root, _, files in os.walk(directory):
        for file in files:
            if file.lower().endswith('.dcm'):
                dicom_files.append(os.path.join(root, file))
    return dicom_files

#  uid : [0,1,2]
#  uid的第一张的phase：uid => phase
def classify_dicom_files(directory):
    phase_dict = {
        "5%": 0,
        "15%": 1,
        "25%": 2,
        "35%": 3,
        "45%": 4,
        "55%": 5,
        "65%": 6,
        "75%": 7,
        "85%": 8,
        "95%": 9,
    }
    series_dict = {}
    dicom_files = get_dicom_files(directory)

    for file_path in dicom_files:
        ds = pydicom.dcmread(file_path)
        if hasattr(ds, 'SeriesInstanceUID'):
            series_uid = ds.SeriesInstanceUID
            if series_uid in series_dict:
                series_dict[series_uid].append(ds)
            else:
                series_dict[series_uid] = [ds]
    return series_dict

def turn_dict_to_list_deep3(series_dict):
    count = len(series_dict)
    ans = []
    for i in range(count):
        ans.append(series_dict[i])
    return ans





if __name__ == '__main__':
    # dicom_folder = "D:\\test\\test_4d"
    # # dicom_folder = "D:\\test\\py_test_path"
    # dicom_path = "D:\\test\\test_4d\\1.2.2046.7929.09.1.20230209.133537.29604_5%\SYNO0035.dcm"
    # ds = pydicom.dcmread(dicom_path)
    # print(ds.SeriesDescription)

    # start_time = time.time()
    # norm_mpr_volume = np.zeros((10, 72, 512, 512))
    # batch_resize_factor = (2.0, 2.0,2.0)
    # resized_mpr_volume = zoom(norm_mpr_volume, batch_resize_factor, order = 1)
    # end_time = time.time()
    # print(f'init_norm_volume,运行时间：{end_time - start_time}')


    # start_time = time.time()
    # norm_mpr_volume = np.zeros((10, 72, 512, 512))
    # batch_resize_factor = (1.0, 2.0, 2.0, 2.0)
    # gpu_volume = cp.array(norm_mpr_volume)
    # resized_mpr_volume = cp_zoom(gpu_volume, batch_resize_factor, order = 1)
    # cp.asnumpy(resized_mpr_volume)
    # end_time = time.time()
    # print(f'init_norm_volume,运行时间：{end_time - start_time}')

    def process_single_volume(args):
        volume, resize_factor = args
        return zoom(volume, resize_factor, order=1)


    start_time = time.time()

    norm_mpr_volume = np.zeros((10, 72, 512, 512))
    mpr_resize_factor = (2.0, 2.0,2.0)

    # 创建参数列表
    args_list = [(vol, mpr_resize_factor) for vol in norm_mpr_volume]

    # 使用线程池并行处理
    with ThreadPoolExecutor(max_workers=5) as executor:  # 可以根据CPU核心数调整max_workers
        resized_volumes = list(executor.map(process_single_volume, args_list))

    np.stack(resized_volumes, axis=0)

    end_time = time.time()
    print(f'init_norm_volume,运行时间：{end_time - start_time}')