from nnunetv2.paths import nnUNet_raw
from pathlib import Path
from batchgenerators.utilities.file_and_folder_operations import *
import multiprocessing
import shutil
from tqdm import tqdm
from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json
from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO

labels = {'background': 0, 'BA': 1, 'R-PCA': 2, 'L-PCA': 3, 'R-ICA': 4, 'R-MCA': 5, 
          'L-ICA': 6, 'L-MCA': 7, 'R-Pcom': 8, 'L-Pcom': 9, 'Acom': 10, 'R-ACA': 11, 
          'L-ACA': 12, '3rd-A2': 13}

read_writer = SimpleITKIO()

def load_and_convert_case(input_image: str, input_seg: str, output_image: str, output_seg: str):
    data, info = read_writer.read_seg(input_seg)
    data[data == 15] = 13
    # print(data.shape)
    read_writer.write_seg(data[0], output_seg, info)
    shutil.copy(input_image, output_image)
    # shutil.copy(input_seg, output_seg)


if __name__ == "__main__":
    modality_dict = {
        'MRA': 'mr',
        'CTA': 'ct',
    }
    dataset_num_dict = {
        'MRA': '996',
        'CTA': '995',
    }
    root = Path(nnUNet_raw)
    modality = 'MRA'
    assert modality in ['MRA', 'CTA']
    mode = modality_dict[modality]
    source = '/home/yjiang/workspace/local/dataset/TopCoW2024_Data_Release'
    source = Path(source)
    
    dataset_num = dataset_num_dict[modality].zfill(3)
    assert len(dataset_num) == 3
    dataset_name = f'Dataset{dataset_num}_TopCoW2024_{modality}'

    imagestr = root / dataset_name / 'imagesTr'
    imagests = root / dataset_name / 'imagesTs'
    labelstr = root / dataset_name / 'labelsTr'
    labelsts = root / dataset_name / 'labelsTs'

    maybe_mkdir_p(imagestr)
    maybe_mkdir_p(imagests)
    maybe_mkdir_p(labelstr)
    maybe_mkdir_p(labelsts)

    train_imgs = sorted((source / 'imagesTr').glob(pattern=f'topcow_{mode}*.nii.gz'))
    train_annotaions = sorted((source / 'cow_seg_labelsTr').glob(pattern=f'topcow_{mode}*.nii.gz'))
    # print(train_imgs)

    r = []
    with multiprocessing.get_context("spawn").Pool(multiprocessing.cpu_count()) as p:
        for img, anno in zip(train_imgs, train_annotaions):
            r.append(
                p.starmap_async(
                    load_and_convert_case,
                    ((
                        img, 
                        anno,
                        imagestr / img.name,
                        labelstr / anno.name
                    ),)
                )
            )

        for i in tqdm(r):
            i.get()

    generate_dataset_json(root / dataset_name, {0: mode}, labels,
                          len(train_imgs), '.nii.gz', dataset_name=dataset_name)
    
