from unittest import result
import imgtools as it
import json
import SimpleITK as sitk
import pathlib
import os
import sys
import nibabel as nib
import copy
import numpy as np
import shutil
import collections
from rich.console import Console
from rich.table import Table

database_linux = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/shidaoai/'
database_windows = 'F:\\shidaoai'



def test_generate_json():
    # train_val_path = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/shidaoai/sichuan'
    train_val_path = '/home/yusongli/_dataset/shidaoai/img/_orig/wangqifeng/sichuan'
    # test_path = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/shidaoai/beijing'
    test_path = '/home/yusongli/_dataset/shidaoai/img/_orig/wangqifeng/beijing'
    # return
    json_path = 'data/_conf/img/dataset.json'
    it.generate_json(train_val_path, test_path, json_savepath=json_path, mask_patterns=['*T[_1 ]*.gz'])
    # it.generate_convert_json_from_json(json_path)


def run_set_pathj():
    # json_path='dataset/json/dataset.json'
    # json_path='/home/yusongli/_project/shidaoai/data/_conf/img/dataset_unetr_1332_332_264.json'
    json_path='/home/yusongli/_project/shidaoai/data/_conf/img/train1332_val332_test264.json'
    new_json_save_path='/home/yusongli/_project/shidaoai/data/_conf/img/train1332_val332_test264_convert.json'
    it.set_pathj(json_path, new_json_save_path)


def test_get_roi_total():

    file = open('data/_conf/img/dataset_unetr_1332_332_264_convert.json', 'r')

    dataset = json.load(file)

    tags = ['training', 'validation', 'test']

    with open('data/_out/img/crop_log.txt', 'w') as f:
        for tag in tags:

            max_radius = -1
            max_radius_img = None

            min_radius = 9999
            min_radius_img = None

            max_x_radius = -1
            max_x_radius_img = None

            min_x_radius = 9999
            min_x_radius_img = None

            max_y_radius = -1
            max_y_radius_img = None

            min_y_radius = 9999
            min_y_radius_img = None

            max_z_radius = -1
            max_z_radius_img = None

            min_z_radius = 9999
            min_z_radius_img = None

            for i in range(len(dataset[tag])):
                img_path = dataset[tag][i]['image'][7]
                mask_path = dataset[tag][i]['label'][7]
                img_savepath = dataset[tag][i]['image'][8]
                mask_savepath = dataset[tag][i]['label'][8]

                # mask_predpath = dataset[tag][i]['label'][7]
                # return_value = it.get_roi(img_path, mask_path, img_savepath, mask_savepath, x_area=48, y_area=61, z_area=28, pred_path=mask_predpath)
                return_value = it.get_roi(img_path, mask_path, img_savepath, mask_savepath, x_area=111, y_area=111, z_area=47, scale_flag=False)

                if isinstance(return_value, (np.int64, int)):
                    if return_value < 0:
                        f.write(f'{return_value} | {img_path}\n')
                        f.flush()
                    if return_value > max_radius:
                        max_radius = return_value
                        max_radius_img = img_path
                        print(f'{tag} | Current max radius: {max_radius}, in {max_radius_img}')
                    if 0 < return_value < min_radius:
                        min_radius = return_value
                        min_radius_img = img_path
                        print(f'{tag} | Current min radius: {min_radius}, in {min_radius_img}')
                elif isinstance(return_value, tuple):
                    if return_value[0] > max_x_radius:
                        max_x_radius = return_value[0]
                        max_x_radius_img = img_path
                        print(f'{tag} | Current max x radius: {max_x_radius}, in {max_x_radius_img}')
                    if return_value[0] < min_x_radius:
                        min_x_radius = return_value[0]
                        min_x_radius_img = img_path
                        print(f'{tag} | Current min x radius: {min_x_radius}, in {min_x_radius_img}')
                    if return_value[1] > max_y_radius:
                        max_y_radius = return_value[1]
                        max_y_radius_img = img_path
                        print(f'{tag} | Current max y radius: {max_y_radius}, in {max_y_radius_img}')
                    if return_value[1] < min_y_radius:
                        min_y_radius = return_value[1]
                        min_y_radius_img = img_path
                        print(f'{tag} | Current min y radius: {min_y_radius}, in {min_y_radius_img}')
                    if return_value[2] > max_z_radius:
                        max_z_radius = return_value[2]
                        max_z_radius_img = img_path
                        print(f'{tag} | Current max z radius: {max_z_radius}, in {max_z_radius_img}')
                    if return_value[2] < min_z_radius:
                        min_z_radius = return_value[2]
                        min_z_radius_img = img_path
                        print(f'{tag} | Current min z radius: {min_z_radius}, in {min_z_radius_img}')

            if isinstance(return_value, (np.int64, int)):
                print(f'{tag} | Total max radius: {max_radius}, in {max_radius_img}')
                print(f'{tag} | Total min radius: {min_radius}, in {min_radius_img}')
                f.write(f'{tag} | Total max radius: {max_radius}, in {max_radius_img}\n')
                f.write(f'{tag} | Total min radius: {min_radius}, in {min_radius_img}\n')
                f.flush()
            elif isinstance(return_value, tuple):
                print(f'{tag} | Total max x radius: {max_x_radius}, in {max_x_radius_img}')
                print(f'{tag} | Total min x radius: {min_x_radius}, in {min_x_radius_img}')
                print(f'{tag} | Total max y radius: {max_y_radius}, in {max_y_radius_img}')
                print(f'{tag} | Total min y radius: {min_y_radius}, in {min_y_radius_img}')
                print(f'{tag} | Total max z radius: {max_z_radius}, in {max_z_radius_img}')
                print(f'{tag} | Total min z radius: {min_z_radius}, in {min_z_radius_img}')
                f.write(f'{tag} | Total max x radius: {max_x_radius}, in {max_x_radius_img}\n')
                f.write(f'{tag} | Total min x radius: {min_x_radius}, in {min_x_radius_img}\n')
                f.write(f'{tag} | Total max y radius: {max_y_radius}, in {max_y_radius_img}\n')
                f.write(f'{tag} | Total min y radius: {min_y_radius}, in {min_y_radius_img}\n')
                f.write(f'{tag} | Total max z radius: {max_z_radius}, in {max_z_radius_img}\n')
                f.write(f'{tag} | Total min z radius: {min_z_radius}, in {min_z_radius_img}\n')
                f.flush()


def test_get_roi_single():
    # img_orig1 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/img/sichuan_liutong_240124.nii.gz'
    # img_target1 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/temp_trash/img/sichuan_liutong_240124.nii.gz'
    # mask_orig1 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/mask/sichuan_liutong_240124.nii.gz'
    # mask_target1 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/temp_trash/mask/sichuan_liutong_240124.nii.gz'

    # img_orig2 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/img/sichuan_liutong_121661.nii.gz'
    # img_target2 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/temp_trash/img/sichuan_liutong_121661.nii.gz'
    # mask_orig2 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/mask/sichuan_liutong_121661.nii.gz'
    # mask_target2 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/temp_trash/mask/sichuan_liutong_121661.nii.gz'

    # img_orig3 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/img/sichuan_liutong_305703.nii.gz'
    # img_target3 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/temp_trash/img/sichuan_liutong_305703.nii.gz'
    # mask_orig3 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/mask/sichuan_liutong_305703.nii.gz'
    # mask_target3 = '/home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/temp_trash/mask/sichuan_liutong_305703.nii.gz'

    # it.get_roi(img_orig1, mask_orig1, img_target1, mask_target1, x_area=48, y_area=61, z_area=28)
    # it.get_roi(img_orig2, mask_orig2, img_target2, mask_target2, x_area=48, y_area=61, z_area=28)
    # it.get_roi(img_orig3, mask_orig3, img_target3, mask_target3, x_area=48, y_area=61, z_area=28)


    # ! img
    img_path = '/home/yusongli/_dataset/shidaoai/img/_out/myunetr_output_spacial_cropped_96_224_224_fix_affine_dilated_gpu_1/56/sichuan_liutong_138963_0.3040_0.4260/sichuan_liutong_138963_CT.nii.gz'
    # ! mask
    mask_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/liutong/138963/138963_GTV-T_MASK.nii.gz'
    # ! pred
    pred_path = '/home/yusongli/_dataset/shidaoai/img/_out/myunetr_output_spacial_cropped_96_224_224_fix_affine_dilated_gpu_1/56/sichuan_liutong_138963_0.3040_0.4260/sichuan_liutong_138963_pred_0.3040_0.4260.nii.gz'
    # ! img save
    img_savepath = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated_coarsetofine/validation/sichuan_liutong_138963/sichuan_liutong_138963_CT.nii.gz'
    # ! cut save
    cutted_roi = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated_coarsetofine/validation/sichuan_liutong_138963/sichuan_liutong_138963_GTV-T_MASK.nii.gz'
    # it._get_roi(img_path, mask_path, img_savepath, mask_savepath, pred_path=pred_path, zoom=1.1, mode='even', translation=False, save=True, output=True)
    result = it._get_roi(img_path, mask_path, pred_path=pred_path, zoom=1.1, mode='even', translation=False, debug_embed_mode=True, save=False, console=True)
    print(result)


def test_scale_intensity():
    file = open('data/_conf/img/dataset_unetr_1332_332_264_convert.json', 'r')
    dataset = json.load(file)
    tags = ['training', 'validation', 'test']

    input_index = 10
    output_index = 11

    for tag in tags:
        for i in range(len(dataset[tag])):
            img_path = dataset[tag][i]['image'][input_index]
            img_savepath = dataset[tag][i]['image'][output_index]
            mask_path = dataset[tag][i]['label'][input_index]
            mask_savepath = dataset[tag][i]['label'][output_index]
            it.scale_intensity(img_path, img_savepath, mask_path=mask_path, mask_savepath=mask_savepath, norm=False)


def test_check_pixel_unetr_pred_total(cur_epoch):
    # data_path = '/home/yusongli/_dataset/shidaoai/img/_out/unetr_output_spacial_scale_intensity'
    # data_path = '/home/yusongli/_dataset/shidaoai/img/_out/myunetr_output_spacial_cropped_96_224_224_fix_affine_dilated_gpu_3'
    # data_path = '/home/yusongli/_dataset/shidaoai/img/_out/myunetr_output_spacial_cropped_96_224_224_fix_affine_dilated_coarsetofine_64_64_64_gpu_0'
    # data_path = '/home/yusongli/_dataset/shidaoai/img/_out/model_output_unetr_spacial_cropped_96_224_224_fix_affine_dilated_gpu_2'
    data_path = '/home/yusongli/_dataset/shidaoai/img/_out/model_output_myunetr_spacial_cropped_96_224_224_fix_affine_dilated_gpu_3'
    min_epoch = []
    max_epoch = []
    min_missing = None
    max_missing = None

    def get_information_single_epoch(epoch, min_missing, max_missing, min_epoch, max_epoch):
        # i = epoch
        data_path_pattern = f'**/{str(epoch)}/**/'
        missing, dice_value = it.check_pixel_and_dice(data_path, data_path_pattern=data_path_pattern)

        if not min_missing:
            min_missing = len(missing)
            min_epoch.append(epoch)
        elif len(missing) == min_missing:
            min_epoch.append(epoch)
        elif len(missing) < min_missing:
            min_missing = len(missing)
            min_epoch = [epoch]

        if not max_missing:
            max_missing = len(missing)
            max_epoch.append(epoch)
        elif len(missing) == max_missing:
            max_epoch.append(epoch)
        elif len(missing) > max_missing:
            max_missing = len(missing)
            max_epoch = [epoch]

        return min_missing, max_missing, missing, dice_value

    min_missing, max_missing, missing, dice_value = get_information_single_epoch(cur_epoch, min_missing, max_missing, min_epoch, max_epoch)
    # for i in range(4, 100, 5):
    #     min_missing, max_missing = get_information_single_epoch(i, min_missing, max_missing)

    print(f'Max missing epochs: {max_epoch}, number of max missing: {max_missing}')
    print(f'Min missing epochs: {min_epoch}, number of min missing: {min_missing}')
    print(f'Missing: {missing}')
    print(f'Max dice {dice_value["max"][0]} at {dice_value["max"][1]}')
    print(f'Min dice {dice_value["min"][0]} at {dice_value["min"][1]}')
    print(f'Avg dice {dice_value["avg"]}')


def test_check_pixel_unetr_pred_single():
    data_path = '/home/yusongli/_dataset/shidaoai/img/_out/unetr_output_spacial_scale_intensity'
    min_epoch = []
    max_epoch = []
    min_missing = None
    max_missing = None
    data_path_pattern = '**/' + '54' + '/**/*_pred.nii.gz'
    missing = it.check_pixel(data_path, data_path_pattern=data_path_pattern)
    print('-' * 50)
    print(missing)
    print(len(missing))


def test_check_pixel_2D_UNet():
    # data_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_img_mask/out_val'
    # data_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_img_mask/out_train'
    data_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_img_mask/out_test'
    missing, dice_value = it.check_pixel(data_path, data_path_pattern='*.nii.gz')
    print('-' * 10)
    log_path = pathlib.Path('data/_out/img/check_pixel.txt')
    if not log_path.exists():
        log_path.touch()
    with open(log_path.as_posix(), 'w') as f:
        print('No pixel:')
        print('No pixel:', file=f)
    #     for item in return_value:
    #         print(item)
    #         print(item, file=f)
    # print(f'Total no pixel number: {len(return_value)}')


def test_json_move():
    convert_json_path = 'data/_conf/img/dataset_unetr_1332_332_264_convert.json'
    it.json_move(convert_json_path=convert_json_path, \
        tags=['training', 'validation', 'test'], \
        keys=['image', 'label'], \
        input_index=12, \
        output_index=15, \
        mode='copy', \
        cover=True)


def test_dice():
    # path_suffix = '_val'
    # path_suffix = '_train'
    path_suffix = '_test'
    pred_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_img_mask/out' + path_suffix
    pred_path = pathlib.Path(pred_path)
    mask_path = pathlib.Path(pred_path.parent.as_posix() + os.sep + 'mask' + path_suffix)
    dices = []
    dice_dict = {}

    for item in pred_path.glob('*.nii.gz'):

        pred = nib.load(item.as_posix())
        mask = nib.load(mask_path.as_posix() + os.sep + item.name.split('.')[0] + '_MASK.nii.gz')

        pred = pred.get_fdata()
        mask = mask.get_fdata()

        dice = it.dice(pred, mask)
        dices.append(dice)
        dice_dict[dice] = item.as_posix()
        print(f'{item.name}: {dice}')

    print('-' * 50)
    print(f'Average dice: {np.mean(dices)}')

    print('-' * 50)
    top = 5
    top = min(top, len(dices))

    # ? sorted order
    reverse=False
    tip = 'max' if reverse else 'min'
    print(f'top {top} {tip} dice img path:')
    sorted_key = sorted(dice_dict.keys(), reverse=reverse)
    sorted_key = sorted(dice_dict.keys())
    for i in range(top):
        print(f'{sorted_key[i]} | {dice_dict.get(sorted_key[i])}')


def peek_mask():
    # mask_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_scale_0-1500_without_norm_classification/training/mask'
    mask_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_scale_0-1500_without_norm_classification/validation/mask'
    mask_path = pathlib.Path(mask_path)
    for item in mask_path.rglob('*.nii.gz'):
        mask_array = nib.load(filename=item.as_posix()).get_fdata()
        counter = collections.Counter(mask_array.flatten())
        idx_0 = int(counter[0])
        idx_1 = int(counter[1])
        if idx_1 >= idx_0:
            print(f'{mask_path.as_posix()}: {counter}')


def _test_get_volume_dice_pair(path_suffix='_val', volume_lmin=0, volume_rmax=20000, reload_flag=False, pred_path='/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_img_mask/out', log_path='data/_out/img/volume_dices_name_pairs.json'):
    # path_suffix = '_val'
    # path_suffix = '_train'
    # path_suffix = '_test'
    pred_path = pred_path + path_suffix

    # ! <<< Limit the verge of the image volume, get the statistics in this area.
    # reload_flag = False

    # ? Total range in _test:
    # volume_lmin, volume_rmax = 240, 18432

    # ? the range you need to caculate:
    # volume_lmin, volume_rmax = 100, 300
    # volume_lmin, volume_rmax = 100, 20000

    if volume_lmin is not None and volume_rmax is not None and volume_lmin > volume_rmax:
        volume_lmin, volume_rmax = volume_rmax, volume_lmin
    # ! >>>

    if not reload_flag:
        json_obj = json.load(open(log_path, 'r'))
        volume_dices_name_pairs = [tuple(item) for item in json_obj]
    else:
        pred_path = pathlib.Path(pred_path)
        mask_path = pathlib.Path(pred_path.parent.as_posix() + os.sep + 'mask' + path_suffix)
        volume_dices_name_pairs = []

        for item in pred_path.glob('*.nii.gz'):

            pred = item.as_posix()
            mask = mask_path.as_posix() + os.sep + item.name.split('.')[0] + '_MASK.nii.gz'

            volume, dice = it.get_volume_dice_pair(mask, pred)
            volume_dices_name_pairs.append((int(volume), dice, item.as_posix()))
            print(f'{item.name}: {volume}, {dice}')

        volume_dices_name_pairs = sorted(volume_dices_name_pairs, key=lambda x: x[0], reverse=True)

        with open(log_path, 'w') as f:
            json_obj = json.dumps(obj=volume_dices_name_pairs, indent=4)
            f.write(str(json_obj))

    if volume_lmin is not None and volume_rmax is not None:
        inner = [x for x in volume_dices_name_pairs if x[0] >= volume_lmin and x[0] <= volume_rmax]
        dices = [x[1] for x in inner]
        paths = [x[2] for x in inner]
        try:
            min_dice_path = paths[dices.index(min(dices))]
        except:
            pass
        if dices:
            inner_avg_dice = np.mean(dices)
            # print(f'inner avg dice: {inner_avg_dice}')
            return inner_avg_dice, len(dices), pathlib.Path(min_dice_path).name, min(dices)
        return 0.0, 0, '-', 0.0
    return volume_dices_name_pairs[0], volume_dices_name_pairs[-1]


def test_get_volume_dice_pair():
    # result = _test_get_volume_dice_pair(volume_lmin=None, volume_rmax=None, reload_flag=False)
    # # ! volume distance: 459 - 52191
    # print(result[0], result[1])
    # return
    group_count = list(range(0, 66000, 6000))
    # print(group_count)
    console = Console()
    # table = Table(show_header=True, header_style='bold magenta', column_style='yellow')
    table = Table(show_header=True, header_style='bold magenta')
    table.add_column('volume', justify='center', style='bold cyan')
    table.add_column('dice', justify='center', style='bold cyan')
    table.add_column('number', justify='center', style='bold cyan')
    table.add_column('min_dice_sample', justify='center', style='bold cyan')
    table.add_column('min_dice', justify='center', style='bold cyan')
    i = 1
    while i < len(group_count):
        # print(f'{group_count[i-1]} - {group_count[i]}')
        # path_suffix='_val', \
        # volume_lmin=0, \
        # volume_rmax=20000, \
        # reload_flag=False, \
        # pred_path='/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_img_mask/out', \
        # log_path='data/_out/img/volume_dices_name_pairs.json'
        result = _test_get_volume_dice_pair(volume_lmin=group_count[i-1], volume_rmax=group_count[i])
        table.add_row(f'{group_count[i-1]} - {group_count[i]}', f'{result[0]:.4f}', f'{result[1]}', f'{result[2]}', f'{result[3]:.4f}')
        i += 1

    console.print(table)


def morphology_dilate():
    convert_json_path = 'data/_conf/img/dataset_unetr_1332_332_264_convert.json'
    it.morphology_dilate(convert_json_path=convert_json_path, \
        tags=['training', 'validation', 'test'], \
        keys=['image', 'label'], \
        input_index=10, \
        output_index=12, \
        kernel_size=7, iterations=2)


def test_check_affine():
    convert_json_path = 'data/_conf/img/dataset_unetr_1332_332_264_convert.json'
    it.check_affine(convert_json_path=convert_json_path, \
        tags=['training', 'validation', 'test'], \
        keys=['image'], \
        input_index=0, \
        output_index=10)


def test_get_roi():
    convert_json_path = 'data/_conf/img/dataset_unetr_1332_332_264_convert.json'
    it.get_roi(convert_json_path=convert_json_path, \
        tags=['training', 'validation', 'test'], \
        keys=['image', 'label'], \
        input_index=1, \
        output_index=13, \
        x_area=112, \
        y_area=112, \
        z_area=48, \
        pred_path=None, \
        scale=False, \
        output=True, \
        save=True, \
        log_path= 'data/_out/img/get_roi.txt')


def test_morphology_dilate_single():
    # max x
    orig_img_path1 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/liutong/240124/240124_CT.nii.gz'
    orig_mask_path1 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/liutong/240124/240124_GTV-T_MASK.nii.gz'
    target_img_path1 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/liutong/240124/240124_CT.nii.gz'
    target_mask_path1 =  '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/liutong/240124/240124_GTV-T_MASK.nii.gz'

    # max y
    orig_img_path2 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/liutong/121661/121661_CT.nii.gz'
    orig_mask_path2 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/liutong/121661/121661_GTV-T_MASK.nii.gz'
    target_img_path2 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/liutong/121661/121661_CT.nii.gz'
    target_mask_path2 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/liutong/121661/121661_GTV-T_MASK.nii.gz'

    # max z
    orig_img_path3 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/liutong/305703/305703_CT.nii.gz'
    orig_mask_path3 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/liutong/305703/305703_GTV-T_MASK.nii.gz'
    target_img_path3 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/liutong/305703/305703_CT.nii.gz'
    target_mask_path3 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/liutong/305703/305703_GTV-T_MASK.nii.gz'

    # min x
    # min x: /home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/img/sichuan_yanxiang_68390.nii.gz
    orig_img_path4 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/yanxiang/68390/68390_CT.nii.gz'
    orig_mask_path4 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/yanxiang/68390/68390_GTV-T_MASK.nii.gz'
    target_img_path4 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/yanxiang/68390/68390_CT.nii.gz'
    target_mask_path4 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/yanxiang/68390/68390_GTV-T_MASK.nii.gz'

    # min y
    # min y: /home/yusongli/_dataset/_IIPL/ShuaiWang/20211223/spacial_output/img/sichuan_fanxingcheng_68924.nii.gz
    orig_img_path5 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/fanxingcheng/68924/68924_CT.nii.gz'
    orig_mask_path5 = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine/sichuan/fanxingcheng/68924/68924_GTV-T_MASK.nii.gz'
    target_img_path5 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/fanxingcheng/68924/68924_CT.nii.gz'
    target_mask_path5 = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated/sichuan/fanxingcheng/68924/68924_GTV-T_MASK.nii.gz'

    kernel_size = 7
    iterations = 2

    it._morphology_dilate(orig_img_path1, orig_mask_path1, target_img_path1, target_mask_path1, kernel_size=kernel_size, iterations=iterations)
    it._morphology_dilate(orig_img_path2, orig_mask_path2, target_img_path2, target_mask_path2, kernel_size=kernel_size, iterations=iterations)
    it._morphology_dilate(orig_img_path3, orig_mask_path3, target_img_path3, target_mask_path3, kernel_size=kernel_size, iterations=iterations)
    it._morphology_dilate(orig_img_path4, orig_mask_path4, target_img_path4, target_mask_path4, kernel_size=kernel_size, iterations=iterations)
    it._morphology_dilate(orig_img_path5, orig_mask_path5, target_img_path5, target_mask_path5, kernel_size=kernel_size, iterations=iterations)

def run_check_imgsize():
    json_path = '/home/yusongli/_project/shidaoai/data/_conf/img/train1332_val332_test264_convert.json'
    print(it.check_imgsize(json_path, \
        # ['training', 'validation'], \
        # ['validation'], \
        ['training'], \
        ['image'], \
        [1]))

def coarse_to_fine_data_prepare():
        # training: cut ROI by dilated and multiple 1.1
        # validation: cut ROI by pred and multiple 1.1

        # coarsetofine
        #   |
        #   |- training
        #       |
        #       |-sichuan_liutong_305703
        #           |
        #           |-CT
        #           |-MASK
        #   |
        #   |- validation
        #       |-sichuan_liutong_305703
        #           |
        #           |-CT
        #           |-MASK
    total_dices = []
    def get_training():
        training_dices = []
        # Only process training
        training_savefolder = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated_coarsetofine/training'
        # training_savefolder = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated_coarsetofine/training'
        convert_json_path = '/home/yusongli/_project/shidaoai/data/_conf/img/dataset_unetr_1332_332_264_convert.json'
        tags=['training']
        keys=['image', 'label']
        input_index = 12
        output_index = -1
        for orig_paths, target_paths in it._iter_convert_json(convert_json_path, tags, keys, input_index, output_index):
            img_path = orig_paths[0]
            img_path = pathlib.Path(img_path)
            mask_path = it._get_targets(img_path.parent.as_posix(), patterns=['*before_dilated*'])[0]
            mask_path = pathlib.Path(mask_path)
            pred_path = orig_paths[1]
            pred_path = pathlib.Path(pred_path)

            where = img_path.parents[2].name
            who = img_path.parents[1].name
            number = img_path.parents[0].name

            img_savepath = training_savefolder + os.sep + where + '_' + who + '_' + number + os.sep + where + '_' + who + '_' + number  + '_CT.nii.gz'
            mask_savepath = training_savefolder + os.sep + where + '_' + who + '_' + number + os.sep + where + '_' + who + '_' + pred_path.name

            # result = it._get_roi(img_path, mask_path, img_savepath, mask_savepath, pred_path=pred_path, zoom=1.1, mode='even', translation=False, save=True, output=True)
            result = it._get_roi(img_path, mask_path, img_savepath, mask_savepath, pred_path=pred_path, zoom=1.1, mode='even', debug_embed_mode=True, translation=False, save=False, console=True)
            if isinstance(result, float):
                training_dices.append(result)
        if training_dices:
            print(f'Training set avg embed dice: {sum(training_dices) / len(training_dices)}')
            total_dices.extend(training_dices)

    def get_roi_from_unetr_or_myunetr_pred_result():
        validation_dices = []
        # Only process validation
        validation_savefolder = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated_coarsetofine/validation'
        # validation_savefolder = '/home/yusongli/_dataset/shidaoai/img/_temp/wangqifeng_spacial_cropped_96_224_224_fix_affine_dilated_coarsetofine/validation'
        rootfolder_path = '/home/yusongli/_dataset/shidaoai/img/_out/myunetr_output_spacial_cropped_96_224_224_fix_affine_dilated_gpu_1/56'
        maskfolder_path = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng_spacial_cropped_96_224_224_fix_affine'

        rootfolder_path = pathlib.Path(rootfolder_path)
        maskfolder_path = pathlib.Path(maskfolder_path)
        validation_savefolder = pathlib.Path(validation_savefolder)

        for pred_path in rootfolder_path.rglob('*pred_0*.nii.gz'):
            where = pred_path.name.split('_')[0]
            who = pred_path.name.split('_')[1]
            number = pred_path.name.split('_')[2]

            img_path = pred_path.parent.as_posix() + os.sep + where + '_' + who + '_' + number + '_CT.nii.gz'
            mask_path = maskfolder_path.as_posix() + os.sep + where + os.sep + who + os.sep + number
            mask_path = it._get_targets(mask_path, patterns=['*MASK*'])[0]
            mask_name = pathlib.Path(mask_path).name

            img_savepath = validation_savefolder.as_posix() + os.sep + where + '_' + who + '_' + number + os.sep + where + '_' + who + '_' + number + '_CT.nii.gz'
            mask_savepath = validation_savefolder.as_posix() + os.sep + where + '_' + who + '_' + number + os.sep + where + '_' + who + '_' + mask_name

            # result = it._get_roi(img_path, mask_path, img_savepath, mask_savepath, pred_path=pred_path, x_area=64, y_area=71, z_area=39, zoom=1.1, translation=True, save=True)
            # result = it._get_roi(img_path, mask_path, img_savepath, mask_savepath, pred_path=pred_path, zoom=1.1, mode='even', translation=False, debug_embed=False, save=True, output=True)
            result = it._get_roi(img_path, mask_path, img_savepath, mask_savepath, pred_path=pred_path, zoom=1.1, mode='even', translation=False, debug_embed_mode=True, save=False, console=True)
            if isinstance(result, float):
                validation_dices.append(result)
        if validation_dices:
            print(f'Validation set avg embed dice: {sum(validation_dices) / len(validation_dices)}')
            total_dices.extend(validation_dices)

    get_roi_from_unetr_or_myunetr_pred_result()
    get_training()

    if total_dices:
        print(f'Total set avg embed dice: {sum(total_dices) / len(total_dices)}')


def test_generate_new_unetr_dataset_json_from_convert_json():
    convert_json_path = 'data/_conf/img/dataset_unetr_1332_332_264_convert.json'
    new_json_save_path='/home/yusongli/_project/shidaoai/data/_conf/img/dataset_unetr_1332_332_264_coarse_to_fine.json'
    keep_index = 16
    it.generate_new_unetr_dataset_json_from_convert_json(convert_json_path, new_json_save_path, keep_index)


def run_embed1():
    convert_json_path = '/home/yusongli/_project/shidaoai/data/_conf/img/dataset_unetr_1332_332_264_convert.json'
    # result = it._get_roi(img_path, mask_path, pred_path=pred_path, zoom=1.1, mode='even', translation=False, debug_embed=True, save=False, output=True)
    it.get_embed(convert_json_path=convert_json_path, \
        tags=['training', 'validation'], \
        keys=['image', 'label'], \
        input_index=16, \
        zoom=1.1, \
        mode='even', \
        translation=False, \
        debug_embed=True, \
        save=False, \
        output=True)


def run_embed2():
    json_path = '/home/yusongli/_project/shidaoai/data/_conf/img/train1332_val332_test264_convert.json'
    for item in it.yield_pathj(json_path, ['training', 'validation'], ['image', 'label'], []):
        pass


def run_yield_pathj():
    json_path = '/home/yusongli/_project/shidaoai/data/_conf/img/train1332_val332_test264_convert.json'
    dices = []
    for item in it.yield_pathj(json_path, \
            ['validation'], \
            ['label'], \
            [1, 2]):
        print(f'{item[0][0]} | {item[1][0]}')
        msk_arr1 = nib.load(item[0][0]).get_fdata()
        msk_arr2 = nib.load(item[1][0]).get_fdata()
        dices.append(it.dice(msk_arr1, msk_arr2))
    print(sum(dices) / len(dices))

def get_dataset_txt_for_unet():
    json_path = '/home/yusongli/_project/shidaoai/data/_conf/img/train1332_val332_test264_convert.json'
    traintxt_path = '/home/yusongli/_project/shidaoai/task/01_seg/2D-Unet/_conf/train.txt'
    valtxt_path = '/home/yusongli/_project/shidaoai/task/01_seg/2D-Unet/_conf/val.txt'
    alltxt_path = '/home/yusongli/_project/shidaoai/task/01_seg/2D-Unet/_conf/all.txt'

    traintxt_path = pathlib.Path(traintxt_path)
    valtxt_path = pathlib.Path(valtxt_path)
    alltxt_path = pathlib.Path(alltxt_path)

    traintxt_folder = traintxt_path.parent
    valtxt_folder = valtxt_path.parent
    alltxt_folder = alltxt_path.parent

    if not traintxt_folder.exists():
        traintxt_folder.mkdir(parents=True, exist_ok=True)
    if not valtxt_folder.exists():
        valtxt_folder.mkdir(parents=True, exist_ok=True)
    if not alltxt_path.exists():
        alltxt_folder.mkdir(parents=True, exist_ok=True)

    traintxt_path.touch(exist_ok=True)
    valtxt_path.touch(exist_ok=True)
    alltxt_path.touch(exist_ok=True)

    with open(alltxt_path, 'w') as f_all:
        for txt, tag in [(traintxt_path, 'training'), (valtxt_path, 'validation')]:
            with open(txt, 'w') as f:
                for item in it.yield_pathj(json_path, \
                        [tag], \
                        ['image', 'label'], \
                        [1]):
                    f.write(f'{item[0][0]}|{item[0][1]}\n')
                    f_all.write(f'{item[0][0]}|{item[0][1]}\n')

def test_tensorflow():
    import tensorflow as tf
    tf.test.is_gpu_available()


def run_cover_rate():
    json_path = '/home/yusongli/_project/shidaoai/data/_conf/img/train1332_val332_test264_convert.json'
    results = []
    offline = False
    if offline:
        for i, item in enumerate(it.yield_pathj(json_path, ['validation'], ['label'], [2, 5])):
            results.append(it.cover_rate(item[1][0], item[0][0]))
    else:
        for i, item in enumerate(it.yield_pathj(json_path, ['validation'], ['label'], [2, 4])):
            results.append(it._get_roi(mask_path=item[0][0], pred_path=item[1][0], debug_embed_mode=2, mode='even', zoom=1.1, translation=False, console=False))
    print(sum(results) / len(results))


if __name__ == '__main__':
    # fix_result()
    # run_set_pathj()
    # run_yield_pathj()
    # test_check_pixel_unetr_pred_total()
    run_cover_rate()
