import cv2
import nibabel as nib
import pathlib

from thesmuggler import smuggle

pp = smuggle('./pathparser.py')
import numpy as np
import math
import sys
from rich.pretty import pprint as print
import datetime
from tqdm.auto import tqdm
import inspect
from typing import Tuple, Optional, TypeVar, Union
from numpy.typing import ArrayLike
import concurrent.futures
import os
from collections import OrderedDict, defaultdict
import shutil
from numpy.distutils.misc_util import is_sequence
import pickle

T = TypeVar('T')
COMMON = '/home/yusongli/_dataset/shidaoai/img'
PATTERN_CT = '*CT*.nii.gz'
PATTERN_MASK = '*T[_1 ]*.nii.gz'
PATTERN_PRED = '*0.*.nii.gz'
TIMESTAMP = datetime.datetime.now().strftime('%Y%m%d')


def _factory() -> defaultdict:
    return defaultdict(_factory)


def _savenii(arr: ArrayLike, savepath: str, header: nib.nifti1.Nifti1Header, affine: ArrayLike,) -> None:
    """Save an array to a given path as nifi file.

    Args:
        arr (str): ndarray
        savepath (str): save path
        header (nifti header): nifti header
        affine (nifti affine): nifti affine
    """
    savepath = pathlib.Path(savepath)
    nii = nib.Nifti1Image(arr, affine, header)
    savepath.parent.mkdir(parents=True, exist_ok=True)
    nii.to_filename(savepath.as_posix())


def _dilate(mask_path: str, mask_savepath: str, kernel_size: Optional[int] = 9, iterations: Optional[int] = 1,) -> None:
    """dilate maskarr

    Args:
        maskarr (nparray): mask array
        kernel_size (int): kernel size
        iterations (int): iterations
    Return:
        nparray: dilated mask array
    """
    print(mask_path)
    nii = nib.load(mask_path)
    maskarr, header, affine = nii.get_fdata().copy(), nii.header, nii.affine
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))
    dilatedarr = cv2.dilate(maskarr, kernel, iterations=iterations)
    _savenii(dilatedarr, mask_savepath, header, affine)


def _scale_intensity(
    img_array: ArrayLike,
    a_min: Optional[int] = 0,
    a_max: Optional[int] = 1500,
    b_min: Optional[int] = 0,
    b_max: Optional[int] = 1,
    norm: Optional[bool] = True,
) -> ArrayLike:
    """Scale the intensity of the image.
    Args:
        img_array: The image array.
        a_min: The minimum value of the original intensity.
        a_max: The maximum value of the original intensity.
        b_min: The minimum value of the new intensity.
        b_max: The maximum value of the new intensity.
    """
    # Image intensity limitation.
    img_array[img_array > a_max] = a_max
    img_array[img_array < a_min] = a_min

    # Image intensity normalization.
    if norm:
        img_array = (img_array - a_min) / (a_max - a_min)
        img_array = img_array * (b_max - b_min) + b_min

    return img_array


def _roi(
    mask_path: str,
    img_path: Optional[str] = None,
    pred_path: Optional[str] = None,
    img_savepath: Optional[str] = None,
    mask_savepath: Optional[str] = None,
    area: Optional[Union[Tuple[int, int, int], int]] = None,
    scale: Optional[Tuple[float, float, float, float]] = None,
    zoom: Optional[Union[Tuple[float, float, float], float]] = None,
    translation: Optional[float] = True,
    mode: Optional[str] = 'middle',
    echo: Optional[bool] = True,
    warning: Optional[bool] = True,
    save: Optional[bool] = False,
) -> Tuple[Tuple, Tuple, Tuple, Tuple, float]:
    """Get ROI from a specific image and mask.
    Args:
        img_path: The path of the image. If None, the images will be
            ignored and only masks will be processed.
        mask_path: The path of the mask.
        pred_path: If not None, cut the img_path and
            mask_path according to the pred_path.
        img_savepath: The path of the image after ROI. The anchor `{start_position}` will be replaced.
        mask_savepath: The path of the mask after ROI. The anchor `{start_position}` will be replaced.
        area: len(3) tuple or int. e.g.: (x_area, y_area, z_area). The
            axis area (radius, not diameter) of ROI. If None, the tumor contour
            will be used.
        scale: len(4) tuple e.g: (a_min, a_max, b_min, b_max)
            The img and mask intensity will be scaled.
        zoom: len(3) tuple or float. if not None, the ROI radius will be zoomed (directly
            multiple) by this value. So if this value larger than 1, the ROI
            radius will be larger. Otherwise smaller.
        echo: If True, print the path into console.
        translation (bool): Judje if translate when bounding box is out of
            image margin.
        mode: 'min', 'middle', 'max' for different cutting strategy.
        save: If True, save offline, else not save (online).
    Return:
        pass
    """

    d = {
        'img': {'path': img_path},
        'mask': {'path': mask_path},
        'pred': {'path': pred_path},
        'imgsv': {'path': img_savepath},
        'masksv': {'path': mask_savepath},
    }

    # Print to console
    if echo:
        echostring = f"{d['img']['path']} | {d['mask']['path']}"
        if d['pred']['path']:
            echostring += f" | {d['pred']['path']}"
        print(echostring)

    # Load things
    for key in ['img', 'mask', 'pred']:
        if d[key]['path']:
            d[key]['nib'] = nib.load(d[key]['path'])
            d[key]['arr'] = d[key]['nib'].get_fdata().copy()
        else:
            d[key]['nib'] = d[key]['arr'] = None

    # If pred path exists and not empty, use pred path, else, use mask path.
    if d['pred']['arr'] is None:
        target_array = d['mask']['arr']
    elif np.all(d['pred']['arr'] == 0):
        target_array = d['mask']['arr']
        if warning:
            print('Warning: pred array is totally zeros. Use mask array instead.')
    else:
        target_array = d['pred']['arr']

    nozero = np.nonzero(target_array)

    # The first dimension: x
    # The second dimension: y
    # The third dimension: z
    x_min = nozero[0].min()
    x_max = nozero[0].max()
    y_min = nozero[1].min()
    y_max = nozero[1].max()
    z_min = nozero[2].min()
    z_max = nozero[2].max()

    x_width = x_max - x_min + 1
    y_width = y_max - y_min + 1
    z_width = z_max - z_min + 1

    x_radius = math.ceil(x_width / 2)
    y_radius = math.ceil(y_width / 2)
    z_radius = math.ceil(z_width / 2)

    x_center = x_min + x_radius
    y_center = y_min + y_radius
    z_center = z_min + z_radius

    # If the size of the roi is artificially specified, the specified value is
    # used. Otherwise, cut along the tumor boundary.
    # Check if x_area is larger than the radius of the image. If larger, the
    # x_radius will be set to the image radius

    if area:
        # Fix the area
        if not is_sequence(area):
            area = (area, area, area)
        x_radius = area[0] or x_radius
        y_radius = area[1] or y_radius
        z_radius = area[2] or z_radius

    if zoom:
        # Zoom
        if not is_sequence(zoom):
            zoom = (zoom, zoom, zoom)
        x_radius = math.ceil((x_radius * zoom[0]) if zoom[0] else x_radius)
        y_radius = math.ceil((y_radius * zoom[1]) if zoom[1] else y_radius)
        z_radius = math.ceil((z_radius * zoom[2]) if zoom[2] else z_radius)

    max_radius = max(x_radius, y_radius, z_radius)

    # ! <<< Different cutting methods.
    # Smallest ROI, cut along edge in different axis.
    if mode == 'min':
        if zoom is not None:
            print("Warning: Zoom detected and cannot take effect in 'min' mode.")
        x_roi_min_orig = x_min
        x_roi_max_orig = x_max
        y_roi_min_orig = y_min
        y_roi_max_orig = y_max
        z_roi_min_orig = z_min
        z_roi_max_orig = z_max
    # Middle roi, but left radius and right radius are same (keep larger one).
    elif mode == 'middle':
        x_roi_min_orig = x_center - x_radius
        x_roi_max_orig = x_center + x_radius
        y_roi_min_orig = y_center - y_radius
        y_roi_max_orig = y_center + y_radius
        z_roi_min_orig = z_center - z_radius
        z_roi_max_orig = z_center + z_radius
    # Common max roi
    elif mode == 'max':
        x_roi_min_orig = x_center - (max_radius + 1)
        x_roi_max_orig = x_center + (max_radius + 1)
        y_roi_min_orig = y_center - (max_radius + 1)
        y_roi_max_orig = y_center + (max_radius + 1)
        z_roi_min_orig = z_center - (max_radius + 1)
        z_roi_max_orig = z_center + (max_radius + 1)
    # ! >>>

    # Note: If the voxal concatinates with the image boundary, there will be a
    # bug that the cropped array will not be a cube.
    # It need to be fixed in the future.
    # ? <<< Fix bug
    if not translation:
        x_roi_min_norm = max(x_roi_min_orig, 0)
        x_roi_max_norm = min(x_roi_max_orig, d['mask']['arr'].shape[0])
        y_roi_min_norm = max(y_roi_min_orig, 0)
        y_roi_max_norm = min(y_roi_max_orig, d['mask']['arr'].shape[1])
        z_roi_min_norm = max(z_roi_min_orig, 0)
        z_roi_max_norm = min(z_roi_max_orig, d['mask']['arr'].shape[2])
    else:
        def __fix_offset(roi_min_orig: int, roi_max_orig: int, img_shape: int) -> Tuple[int, int]:
            if roi_max_orig > (img_shape - 1):
                roi_min_orig -= abs(roi_max_orig - (img_shape - 1))
                roi_max_orig = img_shape - 1
            if roi_min_orig < 0:
                roi_max_orig += abs(roi_min_orig)
                roi_min_orig = 0
            if roi_max_orig > (img_shape - 1):
                roi_max_orig = img_shape - 1
            return roi_min_orig, roi_max_orig

        x_roi_min_norm, x_roi_max_norm = __fix_offset(x_roi_min_orig, x_roi_max_orig, d['mask']['arr'].shape[0])
        y_roi_min_norm, y_roi_max_norm = __fix_offset(y_roi_min_orig, y_roi_max_orig, d['mask']['arr'].shape[1])
        z_roi_min_norm, z_roi_max_norm = __fix_offset(z_roi_min_orig, z_roi_max_orig, d['mask']['arr'].shape[2])
    # ? >>>

    # Regist img savearray and mask savearray
    for keysv, key in [('imgsv', 'img'), ('masksv', 'mask')]:
        if d[key]['arr'] is not None:
            d[keysv]['arr'] = d[key]['arr'][
                x_roi_min_norm:x_roi_max_norm, y_roi_min_norm:y_roi_max_norm, z_roi_min_norm:z_roi_max_norm,
            ]
        else:
            d[keysv]['arr'] = None

    start_position = (x_roi_min_norm, y_roi_min_norm, z_roi_min_norm)

    # Caculate cover rate
    cover_rate: float = _cover_rate(
        d['mask']['arr'], _embed_clone(d['masksv']['arr'], d['mask']['arr'].shape, start_position)
    )

    # Scale intensity
    if scale is not None and d['img']['arr'] is not None:
        d['img']['arr'] = _scale_intensity(d['img']['arr'], scale[0], scale[1], scale[2], scale[3])

    # Save
    if save:
        for keysv, key in [('imgsv', 'img'), ('masksv', 'mask')]:
            if d[keysv]['arr'] is not None and d[keysv]['path'] is not None:
                # ? Record cut start position in a specific path.
                # d[keysv]['path'] = pp.replace(
                #     d[keysv]['path'],
                #     {'{start_position}': f'{start_position[0]}_{start_position[1]}_{start_position[2]}'},
                # )
                _savenii(
                    d[keysv]['arr'], d[keysv]['path'], d[key]['nib'].header, d[key]['nib'].affine,
                )

    # Return info
    tumor_shape = ((x_max - x_min), (y_max - y_min), (z_max - z_min))
    roi_shape = (
        (x_roi_max_norm - x_roi_min_norm),
        (y_roi_max_norm - y_roi_min_norm),
        (z_roi_max_norm - z_roi_min_norm),
    )
    return d['mask']['arr'].shape, tumor_shape, roi_shape, start_position, cover_rate


def _dice(im1: Union[ArrayLike, str], im2: Union[ArrayLike, str], empty_score: Optional[float] = 1.0) -> float:
    """
    Args
    im1 : array-like, bool
    im2 : array-like, bool

    Any other array of identical size. If not boolean, will be converted.

    Returns
    dice : float
    """
    if isinstance(im1, (str, pathlib.Path)):
        im1 = nib.load(im1).get_fdata()
    if isinstance(im2, (str, pathlib.Path)):
        im2 = nib.load(im2).get_fdata()
    im1 = np.asarray(im1).astype(bool)
    im2 = np.asarray(im2).astype(bool)

    if im1.shape != im2.shape:
        raise ValueError(
            f'Shape mismatch: im1 and im2 must have the same shape. \
        im1.shape: {im1.shape}, im2.shape: {im2.shape}.'
        )

    im_sum = im1.sum() + im2.sum()
    if im_sum == 0:
        return empty_score

    # Compute Dice coefficient
    intersection = np.logical_and(im1, im2)

    return 2.0 * intersection.sum() / im_sum


# def _jaccard_score(array_a: ArrayLike, array_b: ArrayLike, racine: int = 3, show_inter_union_img: bool = True) -> float:
#     intersection = np.minimum(array_a, array_b)
#     union = np.maximum(array_a, array_b)

#     # if(show_inter_union_img):
#     #     Image.fromarray(intersection).show()
#     #     Image.fromarray(union).show()

#     return intersection.sum()**(1. / racine) / union.sum()**(1. / racine)


# def _iou_coef(y_true: ArrayLike, y_pred: ArrayLike, smooth: int = 1) -> float:
#     intersection = np.logical_and(y_true, y_pred)
#     union = np.logical_or(y_true, y_pred)
#     return np.sum(intersection) / np.sum(union)


def _embed(small_array: ArrayLike, big_array: ArrayLike, big_index: Tuple) -> ArrayLike:
    """Overwrites values in big_array starting at big_index with those in small_array"""
    slices = [np.s_[i : i + j] for i, j in zip(big_index, small_array.shape)]
    big_array[tuple(slices)] = small_array
    return big_array


def _embed_clone(
    small_array: ArrayLike, big_array_shape: Tuple[int, int, int], big_index: Tuple[int, int, int]
) -> ArrayLike:
    big_array = np.zeros(big_array_shape)
    return _embed(small_array, big_array, big_index)


def _cover_rate(gt: ArrayLike, pred: ArrayLike) -> float:
    intersection = np.logical_and(gt, pred)
    return intersection.sum() / gt.sum()


class MyCounter(object):
    def __init__(self) -> None:
        self._set_value()

    def _set_value(self) -> None:
        self.max_val = ~sys.maxsize
        self.max_sample = None
        self.min_val = sys.maxsize
        self.min_sample = None

    def set_max(self, val: float, sample: T) -> None:
        if val > self.max_val:
            self.max_val = val
            self.max_sample = sample

    def get_max(self) -> Tuple[float, T]:
        return self.max_val, self.max_sample

    def set_min(self, val: float, sample: T) -> None:
        if val < self.min_val:
            self.min_val = val
            self.min_sample = sample

    def get_min(self) -> None:
        return self.min_val, self.min_sample

    def set(self, val: float, sample: T) -> None:
        self.set_max(val, sample)
        self.set_min(val, sample)

    def get(self) -> None:
        return self.get_max(), self.get_min()

    def clear(self) -> None:
        self._set_value()


def roi_myunetr_pred(yaml_metapath: str) -> None:
    '''
    Cut img,msk,dmsk by pdmsk, mode is edge, and set zoom. Caculate the cover
    rate between dmsk and pdmsk.
    1. Don't save offline, just caculate the cover rate.
    2. If cover rate is ok, save offline.

    e.g.
    ```
    roi_myunetr_pred(
        yaml_convpath, 2.0,
        {'{timestamp}': '20220321', '{epoch}': '16'},
         [2, 3, 4, 5])
    ```
    '''
    epoch = 56
    save = True
    # In normal instance, the training and test dataset should be 1.0
    # cover_rate.
    # But in practical instance, I found that the training is 0.999 but not 1.0.
    # so I created the debug list for saving those exist in training and test
    # but not 1.0 cover_rate samples.
    db = []  # For debug.
    rate_list = []
    counter = MyCounter()
    log = pathlib.Path('log.txt')
    log.touch(exist_ok=True)
    with open(log, 'r+') as f:
        f.seek(0)
        f.truncate()
        # for zoom in tqdm.tqdm(np.arange(1.0, 2.0, 0.2)):
        for zoom in tqdm.tqdm([1.1]):
            for tag in ['training', 'validation', 'test']:
                generator = pp.YamlMetaGenerator(yaml_metapath, tags=[tag], keys=['image', 'label'])
                for item in tqdm.tqdm(generator, total=len(generator)):
                    _tag, _key, _where, _who, _number, _imgname = item[0].split('+')
                    # if _who != 'fanxingcheng' or _number != '50557':
                    #     continue
                    _mskname = item[1].split('+')[-1]
                    img_folder = f'{COMMON}/_out/wangqifeng-spacial-precropped_96_224_224/{_where}/{_who}/{_number}'
                    msk_folder = img_folder
                    mskd_folder = (
                        f'{COMMON}'
                        '/_out/wangqifeng-spacial-precropped_96_224_224-dilated_k7_i2_maskonly'
                        f'/{_where}/{_who}/{_number}'
                    )
                    pred_folder = (
                        f'{COMMON}'
                        '/_out/wangqifeng-spacial-precropped_96_224_224-dilated_k7_i2-net_myunetr_val'
                        f'/{epoch}/{_where}/{_who}/{_number}'
                    )
                    imgs_folder = (
                        f'{COMMON}'
                        '/_out/wangqifeng-spacial-precropped_96_224_224-dilated_k7_i2-net_myunetr_val-roi_zoom'
                        f'/{TIMESTAMP}/{epoch}/zoom-{zoom}/{_where}/{_who}/{_number}/{{start_position}}'
                    )
                    msks_folder = imgs_folder

                    img = pp.filt_path(img_folder, patterns=PATTERN_CT)
                    msk = pp.filt_path(msk_folder, patterns=PATTERN_MASK)
                    mskd = pp.filt_path(mskd_folder, patterns=PATTERN_MASK)
                    pred = pp.filt_path(pred_folder, patterns=PATTERN_PRED, echo=False)
                    imgs = (pathlib.Path(imgs_folder) / _imgname).as_posix()
                    msks = (pathlib.Path(msks_folder) / _mskname).as_posix()

                    target = pred or mskd  # pred is None if tag == 'training'

                    result = _roi(
                        msk,
                        img_path=img,
                        pred_path=target,
                        img_savepath=imgs,
                        mask_savepath=msks,
                        zoom=zoom,
                        mode='middle',
                        save=save,
                        echo=False,
                    )

                    if tag == 'validation':
                        db.append((result[4], img))
                        rate_list.append(result[4])
                    volume = result[1][0] * result[1][1] * result[1][2]
                    counter.set(volume, msks)

            string = f'''{'-' * 50}
            zoom: {zoom}
            counter get:
            {counter.get()}
            avg cover rate: {np.mean(rate_list)}
            cover_rate < 1.0: {len(db)}
            {db}'''

            f.write(f'{inspect.cleandoc(string)}\n')
            f.flush()

            counter.clear()
            rate_list.clear()
            db.clear()


def dice_per_epoch(yaml_metapath: str) -> None:
    '''
    For myunetr/unetr big/small mask dice caculate.
    '''
    dices_big = []
    dices_sml = []
    errors = []
    generator = pp.YamlMetaGenerator(yaml_metapath, ['validation'], ['label'])
    for item in tqdm.tqdm(generator, total=len(generator)):
        _where, _who, _number = item[0][2:5]
        mask_big_samplefolder = os.path.join(
            f'{COMMON}/_out/wangqifeng-spacial-precropped_96_224_224', f'{_where}/{_who}/{_number}'
        )
        mask_sml_samplefolder = os.path.join(
            f'{COMMON}/_out/wangqifeng-spacial-precropped_96_224_224-dilated_k7_i2-net_myunetr_val-roi_zoom',
            '20220407/56/zoom-1.1',
            f'{_where}/{_who}/{_number}',
        )
        # pred_samplefolder = os.path.join(
        #     f'{COMMON}/_out/',
        #     'wangqifeng-spacial-precropped_96_224_224-dilated_k7_i2-net_myunetr_val-roi_zoom-net_myunetr_val/20220406',
        #     f'17/{_where}/{_who}/{_number}'
        # )
        pred_samplefolder = os.path.join(
            f'{COMMON}/_out/',
            'wangqifeng-spacial-precropped_96_224_224-dilated_k7_i2-net_myunetr_val-roi_zoom-net_2dunet_val/20220407',
        )
        pred_sml = pp.filt_path(pred_samplefolder, patterns=f'*{_number}*.nii.gz')
        mask_big = pp.filt_path(mask_big_samplefolder, patterns=PATTERN_MASK)
        mask_sml = pp.filt_path(mask_sml_samplefolder, patterns=PATTERN_MASK)

        start_position: str = pathlib.Path(mask_sml).parent.name
        start_position: Tuple[int, int, int] = tuple(int(item) for item in start_position.split('_'))

        pred_sml_arr = nib.load(pred_sml).get_fdata()
        mask_big_arr = nib.load(mask_big).get_fdata()
        try:
            pred_big_arr = _embed(pred_sml_arr, np.zeros_like(mask_big_arr), start_position)
            dice_big = _dice(mask_big_arr, pred_big_arr)
            dices_big.append(dice_big)

            dice_sml = _dice(mask_sml, pred_sml)
            dices_sml.append(dice_sml)
        except Exception:
            errors.append((pred_sml, mask_big, mask_sml))
    mean_dice_big = np.mean(dices_big)
    mean_dice_sml = np.mean(dices_sml)
    print(f'mean big dice: {mean_dice_big}')
    print(f'mean sml dice: {mean_dice_sml}')
    import ipdb

    ipdb.set_trace()  # ! debug yusongli
    return np.mean(mean_dice_big), np.mean(mean_dice_sml)


def bulkdice(yaml_metapath: str) -> None:
    EPOCH = 99

    with concurrent.futures.ProcessPoolExecutor() as executor:  # tmux:0
        # futures = [executor.submit(dice_per_epoch, yaml_metapath, i) for i in range(EPOCH + 1)]
        [executor.submit(dice_per_epoch, yaml_metapath, i) for i in range(EPOCH + 1)]

    # print(dice_per_epoch(yaml_metapath, 0))


def unet_txt(yaml_metapath: str, traintxt: str, valtxt: str, testtxt: str) -> None:
    for txt, tag in [(traintxt, 'training'), (valtxt, 'validation'), (testtxt, 'test')]:
        with open(txt, 'r+') as f:
            f.seek(0)
            f.truncate()
            generator = pp.YamlMetaGenerator(yaml_metapath, tags=[tag], keys=['image', 'label'])
            for item in generator:
                _tag, _key, _where, _who, _number, _imgname = item[0].split('+')
                folder = os.path.join(
                    f'{COMMON}',
                    '_out/wangqifeng-spacial-precropped_96_224_224-dilated_k7_i2-net_myunetr_val-roi_zoom',
                    '20220407/56/zoom-1.1',
                )
                item[0] = pp.exists(pp.filt_path(folder, patterns=f'*{_where}/{_who}/{_number}/**/*CT*.nii.gz'))
                item[1] = pp.exists(pp.filt_path(folder, patterns=f'*{_where}/{_who}/{_number}/**/*MASK*.nii.gz'))
                f.write(f'{item[0]}|{item[1]}\n')


def _JDICT():
    return OrderedDict(
        [
            ('description', 'shidaoai segmentation'),
            ('labels', {'0': 'background', '1': 'shidaoai'}),
            ('licence', 'GPL-3.0'),
            ('modality', {'0': 'CT'}),
            ('name', 'shidaoai'),
            ('numTest', None),
            ('numTraining', None),
            ('reference', 'no'),
            ('release', '0.0'),
            ('tensorImageSize', '3D'),
            ('test', []),
            ('training', []),
        ]
    )


def nn_dataset(yaml_metapath: str) -> None:
    '''
    Generate nn series dataset from wangqifeng dataset format.
    '''
    shidaoaibase = '/home/yusongli/_dataset/shidaoai/img/_out/wangqifeng-spacial'
    nnbase = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task602_Z2'
    jdict = _JDICT()

    s_to_n = defaultdict(_factory)
    n_to_s = defaultdict(_factory)
    tri = 0
    tsi = 0
    generator = pp.YamlMetaGenerator(yaml_metapath, tags=['training', 'validation', 'test'], keys=['image', 'label'])
    for item in tqdm(generator, total=len(generator), leave=False, dynamic_ncols=True):
        _tag, _imgkey, _where, _who, _number, _imgname = item[0][:6]
        _mskkey, _mskname = item[1][1], item[1][5]

        origfolder = f'{shidaoaibase}/{_where}/{_who}/{_number}'
        origimg = pp.exists(f'{origfolder}/{_imgname}')
        origmsk = pp.exists(f'{origfolder}/{_mskname}')

        if _tag == 'test':
            newimgtag = f'{_imgkey}sTs'
            newmsktag = f'{_mskkey}sTs'
            newimgfolder = f'{nnbase}/{newimgtag}'
            newmskfolder = f'{nnbase}/{newmsktag}'
            newimg = pathlib.Path(f'{newimgfolder}/S_{tsi:04d}_0000.nii.gz')
            newmsk = pathlib.Path(f'{newmskfolder}/S_{tsi:04d}.nii.gz')
            jdict['test'].append(newimg.relative_to(nnbase).as_posix())
        else:
            newimgtag = f'{_imgkey}sTr'
            newmsktag = f'{_mskkey}sTr'
            newimgfolder = f'{nnbase}/{newimgtag}'
            newmskfolder = f'{nnbase}/{newmsktag}'
            newimg = pathlib.Path(f'{newimgfolder}/S_{tri:04d}_0000.nii.gz')
            newmsk = pathlib.Path(f'{newmskfolder}/S_{tri:04d}.nii.gz')
            jdict['training'].append(
                {
                    'image': newimg.relative_to(nnbase).as_posix(),
                    'label': newmsk.relative_to(nnbase).as_posix(),
                }
            )
        recorder = {
            'shidaoai': [_tag, _where, _who, _number, [_imgname, _mskname]],
            'nn': [newimg.name, newmsk.name],
        }
        s_to_n[_tag][_where][_who][_number] = recorder
        n_to_s[f'{_tag}'][f"{(tsi if _tag == 'test' else tri):04d}"] = recorder

        if _tag == 'test':
            tsi += 1
        else:
            tri += 1

        newimg.parent.mkdir(parents=True, exist_ok=True)
        newmsk.parent.mkdir(parents=True, exist_ok=True)
        shutil.copy(origimg, newimg.as_posix())
        shutil.copy(origmsk, newmsk.as_posix())

    jdict['numTest'] = tsi
    jdict['numTraining'] = tri

    pp.savejson(jdict, f'{nnbase}/dataset.json')
    pp.savejson(s_to_n, f'{nnbase}/s_to_n.json')
    pp.savejson(n_to_s, f'{nnbase}/n_to_s.json')


def nn_roi() -> None:
    zoom = 2.0
    save = True

    # savepath = f'/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task607_CZ2/zoom_{zoom}/{TIMESTAMP}'
    savepath = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task607_CZ2_empty_pred'

    train_val = [
        # [img, msk, pred]
        [
            pathlib.Path('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task606_C/imagesTr'),  # -> 1664
            pathlib.Path('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task606_C/labelsTr'),  # -> 1664
            pathlib.Path('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task606_C/new_coarse__nnUNetPlansv2.1/fold_0/validation_raw_postprocessed'),  # -> 1664
        ],
        1665,  # total
        'Train',  # description
        f'{savepath}',  # save path
    ]

    # test = [
    #     # [img, msk, pred]
    #     [
    #         pathlib.Path('/data/caibaoshuo/imagesTs'),  # -> 264
    #         pathlib.Path('/data/caibaoshuo/labelsTs'),  # -> 264
    #         pathlib.Path('/data/caibaoshuo/quick_data/out2'),  # -> 264
    #     ],
    #     264,  # total
    #     'Test',  # description
    #     f'{savepath}/out2',  # save path
    # ]

    # ! <<< open debug yusongli
    # l = [
    #     'zc_0017',
    #     'fxc_0077',
    #     'lt_0718',
    #     'zc_0195',
    #     'lt_0830',
    #     'zc_0090',
    #     'yx_0112',
    #     'lt_0544',
    #     'fxc_0041',
    #     'ly_0123',
    #     'lt_0827',
    #     'lt_0747'
    # ]
    # ! >>> clos debug

    # for things in tqdm([train_val, test], leave=False, dynamic_ncols=True, desc='Tag'):
    for things in tqdm([train_val], leave=False, dynamic_ncols=True, desc='Tag'):
        mylist, total, desc, tagsavepath = things
        cover_rates = []
        pathlib.Path(tagsavepath).mkdir(parents=True, exist_ok=True)
        with open(f'{tagsavepath}/start_positions.txt', 'w') as f:
            f.seek(0)
            f.truncate()
            for img in (
                pbar := tqdm(mylist[0].rglob('*nii*'), total=total, leave=False, dynamic_ncols=True, desc=desc)
            ):
                numstr = '_'.join(pathlib.Path(img).name.split('.')[0].split('_')[:-1])
                msk = pp.filt_path(mylist[1], patterns=f'*{numstr}*', echo=False)
                pred = pp.filt_path(mylist[2], patterns=f'*{numstr}*', echo=False)
                img_sv = f'{tagsavepath}/imagesTr/{pathlib.Path(img).name}'
                msk_sv = f'{tagsavepath}/labelsTr/{pathlib.Path(msk).name}'
                results = _roi(
                    img_path=img,
                    mask_path=msk,
                    pred_path=pred,
                    img_savepath=img_sv,
                    mask_savepath=msk_sv,
                    zoom=zoom,
                    echo=False,
                    warning=False,
                    save=save,
                )
                start_position, cover_rate = results[-2:]
                pbar.set_description(f'{desc} cover_rate => {cover_rate:.4f}')
                cover_rates.append(cover_rate)
                f.write(f'{numstr}|{start_position[0]}+{start_position[1]}+{start_position[2]}\n')
        things.append(np.mean(cover_rates))

    # for things in [train_val, test]:
    for things in [train_val]:
        print(f'{things[2]} avg cover_rate: {things[-1]}')


def nn_json() -> None:
    '''
    Generate dataset.json from nn series dataset format.
    '''
    tasknum = 607
    taskname = 'CZ2'

    data = pathlib.Path(f'/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task{tasknum}_{taskname}')
    jdict = _JDICT()

    # Get train data and test data
    # train data: imagesTr, labelsTr
    # train number: 0 - 1663
    # test data: imagesTs, labelTs
    # test number: 0 - 264
    train, val, all = nn_split((data / 'labelsTr'))
    jdict['training'].extend(
        [
            {
                'image': f'imagesTr/{item.name.split(".")[0]}_0000.nii.gz',
                'label': f'labelsTr/{item.name}'
            } for item in all
        ]
    )
    jdict['test'] = []
    jdict['numTraining'] = len(jdict['training'])
    jdict['numTest'] = len(jdict['test'])
    pp.savejson(jdict, f'{data}/dataset.json')


def nn_split(folder: str) -> None:
    import random
    random.seed(0)
    folder = pathlib.Path(folder)
    all = folder.rglob('*nii.gz')
    all = list(all)
    d = {}
    for item in all:
        who = item.name.split('_')[0]
        if who not in d:
            d[who] = [item]
        else:
            d[who].append(item)
    val = []
    for key in d.keys():
        val.extend(random.sample(d[key], int(len(d[key]) * 0.2)))
    train = [item for item in all if item not in val]
    # val = map((lambda x: str(x)), val)
    # val = list(val)
    # train = map((lambda x: str(x)), train)
    # train = list(train)
    return train, val, all


def nn_yaml(splits_final_txt, start_positions, savepath):
    # with open(start_positions, 'r') as f:
    #     start_positions = f.read()
    # start_positions = start_positions.split('\n')
    # start_positions = {item.split('|')[0]: item.split('|')[1].split('+') for item in start_positions if item != ''}

    with open(splits_final_txt, 'rb') as f:
        splits = pickle.load(f)

    result = {'training': [], 'validation': []}

    for i, j in [('train', 'training'), ('val', 'validation')]:
        for item in splits[0][i]:
            result[j].append(
                {
                    'image': ['image', item],
                    'label': ['label', item],
                }
            )

    pp.saveyaml(result, savepath)


if __name__ == '__main__':
    yaml_metapath = '/home/yusongli/Documents/shidaoai_new_project/data/meta_data.yaml'
    yaml_metapath2 = '/home/yusongli/Documents/shidaoai_new_project/data/meta_data2.yaml'
    # roi_myunetr_pred(yaml_metapath)

    # bulkdice(yaml_metapath)

    # dice_per_epoch(yaml_metapath2)

    # unettxtfolder = '/home/yusongli/Documents/shidaoai_new_project/networks/2D-Unet/_conf'
    # traintxt = f'{unettxtfolder}/train.txt'
    # valtxt = f'{unettxtfolder}/val.txt'
    # testtxt = f'{unettxtfolder}/test.txt'
    # unet_txt(yaml_metapath, traintxt, valtxt, testtxt)

    # nnunet_dataset(yaml_metapath2)
    # nn_json()
    # nn_roi()
    splits_final_txt = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_preprocessed/Task607_CZ2/splits_final.pkl'
    start_positions = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task607_CZ2/start_positions.txt'
    savepath = '/home/yusongli/Documents/shidaoai_new_project/data/Task607_CZ2.yaml'
    nn_yaml(splits_final_txt, start_positions, savepath)
