# ===============
# === Brats21 ===
# ===============
from nnunet.network_architecture.yunet import YUNet, Change
from nnunet.network_architecture.initialization import InitWeights_He
from thesmuggler import smuggle
import torch
from torch import nn
import os
from rich.pretty import pprint as print
import math
import itertools
from nnunet.network_architecture.generic_UNet import (
    ConvDropoutNormNonlin,
    StackedConvLayers,
)
from nnunet.network_architecture.yunet import StackedTranspConvLayers
import pathlib
from tqdm.auto import tqdm
from batchgenerators.utilities.file_and_folder_operations import load_pickle, save_pickle
import random


nnunet_yusongli = smuggle('../nnunetv1/yusongli.py')
pp = smuggle('../unetr/src/data/pathparser.py')
op = smuggle('../unetr/src/data/operator.py')


_MYROOT = '/home/yusongli/_dataset/shidaoai/img/_out/nn'
_MYDATA = f'{_MYROOT}/DATASET'
_MYMODEL = 'nnUNet'

BASE = f'{_MYDATA}/{_MYMODEL}_raw_data_base'
PREPROCESSING_OUTPUT_DIR = f'{_MYDATA}/{_MYMODEL}_preprocessed'
NETWORK_TRAINING_OUTPUT_DIR_BASE = f'{_MYDATA}/{_MYMODEL}_cropped_data'


def test_model():
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = '2'
    x = torch.randn([1, 1, 24, 64, 80])
    model = YUNet(
        input_channels=1,
        base_num_features=32,
        num_classes=2,
        num_pool=4,
        num_conv_per_stage=2,
        feat_map_mul_on_downscale=2,
        conv_op=nn.Conv3d,
        norm_op=nn.BatchNorm3d,
        norm_op_kwargs={'eps': 1e-05, 'affine': True},
        dropout_op=nn.Dropout3d,
        dropout_op_kwargs={'p': 0, 'inplace': True},
        nonlin=nn.LeakyReLU,
        nonlin_kwargs={'negative_slope': 0.01, 'inplace': True},
        deep_supervision=True,
        dropout_in_localization=False,
        final_nonlin=lambda x: x,
        weightInitializer=InitWeights_He(1e-2),
        pool_op_kernel_sizes=[[1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2]],
        conv_kernel_sizes=[[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
        upscale_logits=False,
        convolutional_pooling=True,
        convolutional_upsampling=True,
        max_num_features=320,
        basic_block=ConvDropoutNormNonlin,
        seg_output_use_bias=False,
        encoder_scale=1,
        axial_attention=True,
        heads=1,
        dim_heads=4,
        volume_shape=(24, 64, 80),
        no_attention=[0],
        axial_bn=True,
        sum_axial_out=True,
        residual_attention=True,
    )
    # summary(model, input_size=x.shape, device='cpu', depth=8)
    y = model(x)


def dice():
    # folder_template = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task602_Z2/{trainer}__nnUNetPlansv2.1/fold_0/{subfolder}'
    # # small_pred_folder = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task602_Z2/nnUNetTrainerV2BraTSRegions_DA4_BN_BD_yusongli__nnUNetPlansv2.1/fold_0/validation_raw_postprocessed'
    # # small_pred_folder = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task602_Z2/nnUNetTrainerV2BraTSRegions_DA4_BN_BD_largeUnet_Groupnorm_yusongli__nnUNetPlansv2.1/fold_0/validation_raw'
    # # small_pred_folder1 = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task602_Z2/nnUNetTrainerV2BraTSRegions_DA4_BN_BD_axialattention_unet_yusongli__nnUNetPlansv2.1/fold_0/validation_raw'
    # # small_pred_folder2 = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task602_Z2/nnUNetTrainerV2BraTSRegions_DA4_BN_BD_yunet__nnUNetPlansv2.1/fold_0/validation_raw'
    # # small_pred_folder3 = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task602_Z2/nnUNetTrainerV2BraTSRegions_DA4_BN_BD_res_axial_attention_unet_yusongli__nnUNetPlansv2.1/fold_0/validation_raw'

    # trainers = [
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_ATT', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_RCAB', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_RCABATT', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_resRCABATT', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_ATTRCAB', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_neckarch_resRCABresATT', '{subfolder}': 'validation_raw'}),

    #     # pp.replace(folder_template, {'{trainer}': 'yunet_neckarch_ArchBlock_havefirst', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_neckarch_ArchBlock_nofirst', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_neckarch_resRCABATT_havefirst', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_resRCABATT_rollback', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_resRCABATT_havefirst', '{subfolder}': 'validation_raw'}),

    #     # pp.replace(folder_template, {'{trainer}': 'yunet_change', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_change2', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_change3', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'Brats21Lab_resaxisatt_batch_06_epoch_300', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_resRCABATT_change2_havefirst', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_resRCABATT_change2_nofirst_bmax_e300', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'yunet_arch_resRCABATT_change2_havefirst_bmax_e300', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_yunet_03', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_yunet_00', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_yunet_01', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_yunet_02', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_yunet_04', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_yunet_05', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_yunet_06', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'cluster_brats_00', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'bottleneck_change_00', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'bottleneck_change_01', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'bottleneck_change_02', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'bottleneck_change_03', '{subfolder}': 'validation_raw'}),
    #     # pp.replace(folder_template, {'{trainer}': 'bottleneck_change_04', '{subfolder}': 'validation_raw'}),
    #     '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task607_CZ2/new_fine_brats__nnUNetPlansv2.1/fold_0/validation_raw',
    #     '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task607_CZ2/new_fine_yunet__nnUNetPlansv2.1/fold_0/validation_raw',
    #     '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task607_CZ2/new_fine_nnunet__nnUNetPlansv2.1/fold_0/validation_raw',
    # ]

    # for trainer in trainers:
    #     nnunet_yusongli.dice(trainer)

    folders = [
        '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task607_CZ2/new_fine_yunet__nnUNetPlansv2.1/fold_0/validation_raw_postprocessed',
        '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task607_CZ2/new_fine_brats__nnUNetPlansv2.1/fold_0/validation_raw_postprocessed',
        '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task607_CZ2/new_fine_nnunet__nnUNetPlansv2.1/fold_0/validation_raw_postprocessed',
    ]
    big_gt_folder = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task606_C/labelsTr'
    start_positions = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task607_CZ2/start_positions.txt'
    val_list = load_pickle('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_preprocessed/Task607_CZ2/splits_final.pkl')[0]['val']
    for folder in folders:
        nnunet_yusongli.dice(folder, big_gt_folder, start_positions, val_list)



def dice_big():
    pred_paths = [
        pathlib.Path('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task606_C/new_coarse_01__nnUNetPlansv2.1/fold_0/validation_raw_postprocessed'),
        pathlib.Path('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task606_C/new_coarse__nnUNetPlansv2.1/fold_0/validation_raw_postprocessed')
    ]
    gt_path = pathlib.Path('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task606_C/labelsTr')
    dices = []
    for pred_path in pred_paths:
        for pred in tqdm(pred_path.iterdir(), total=334):
            gt = pp.filt_path(gt_path, patterns=f'*{pred.name}*', echo=False)
            if gt is None:
                continue
            dice = op._dice(pred, gt)
            if dice == 0.0:
                continue
            dices.append(dice)
        print('=' * 50)
        print(f'Mean: {sum(dices) / len(dices)}')


def split_coarse():
    # plans = load_pickle('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_cropped_data/nnUNet/3d_fullres/Task606_C/new_coarse_00__nnUNetPlansv2.1/plans.pkl')
    splits = [
        {
            'train': None,
            'val': []
        }
    ]
    random.seed(0)
    labels = pathlib.Path('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_raw_data_base/nnUNet_raw_data/Task606_C/labelsTr')
    d = {}
    for item in labels.iterdir():
        key = item.name.split('_')[0]
        value = item.name.split('.')[0]
        if key in d:
            d[key].append(value)
        else:
            d[key] = [value]
    for key in d:
        splits[0]['val'].extend(random.sample(d[key], int(len(d[key]) * 0.2)))
    splits[0]['train'] = [item.name.split('.')[0] for item in labels.iterdir() if item.name.split('.')[0] not in splits[0]['val']]
    savepath = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_preprocessed/Task606_C/splits_final_yusongli.pkl'
    import ipdb; ipdb.set_trace()
    save_pickle(splits, savepath)


def split_fine() -> None:
    C = load_pickle('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_preprocessed/Task606_C/splits_final.pkl')
    CZ2 = load_pickle('/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_preprocessed/Task607_CZ2/splits_final.pkl')
    B = load_pickle('/home/yusongli/splits_final_first.pkl')
    import ipdb; ipdb.set_trace()  # ! debug yusongli
    # splits[0]['val'].remove('zc_0017')
    # splits[0]['val'].remove('fxc_0077')
    # splits[0]['val'].remove('lt_0718')
    # splits[0]['val'].remove('zc_0195')
    # splits[0]['val'].remove('lt_0830')
    # splits[0]['val'].remove('zc_0090')
    # splits[0]['val'].remove('yx_0112')
    # splits[0]['val'].remove('lt_0544')
    # splits[0]['val'].remove('fxc_0041')
    # splits[0]['val'].remove('ly_0123')
    # splits[0]['val'].remove('lt_0827')
    # splits[0]['val'].remove('lt_0747')
    # savepath = '/home/yusongli/_dataset/shidaoai/img/_out/nn/DATASET/nnUNet_preprocessed/Task606_C/splits_final_second.pkl'
    # save_pickle(splits, savepath)


def test_conv() -> None:
    """
    Given the input and output tensor shapes, find the parameters of the convolutional layers.
    """
    # =========================================================================================
    device = 'cpu'

    # The original tensor. x can be 2D image (4 dimensions total) or 3D image (5 dimensions total).
    # x = torch.randn(1, 32, 24, 64, 80).to(device)
    x = torch.randn(1, 32, 32, 80, 96).to(device)
    # x = torch.randn(1, 128, 24, 16, 20).to(device)
    # x = torch.randn(1, 256, 12, 8, 10).to(device)

    # if upsample, transp should be True, else False
    transp = False

    # The target shape, can be len(3) list or len(2) list.
    target = [32, 80, 96]  # (10, 32, 24, 64, 80)
    # target = [24, 32, 40]  # (10, 64, 24, 32, 40)
    # target = [24, 16, 20]  # (10, 128, 24, 16, 20)
    # target = [12, 8, 10]  # (10, 256, 12, 8, 10)

    # the search counter
    counter = 100

    if False:
        # conv[0] for the front (downsample), conv[1] for the back (upsample).
        # conv[0][0][0] means the downsample path, the 0-layer to the 0-layer.
        # conv[0][0][1] means the downsample path, the 0-layer to the 1-layer.
        # None means doesn't need to downsample or upsample.
        # False means haven't caculated yet.
        # etc.
        conv = [
            [
                [
                    None,
                    # (10, 32, 24, 64, 80) -> (10, 64, 24, 32, 40)
                    nn.Conv3d(32, 64, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)).to(device),
                    # (10, 32, 24, 64, 80) -> (10, 128, 24, 16, 20)
                    nn.Conv3d(32, 128, kernel_size=(1, 35, 43), stride=(1, 2, 2), padding=(0, 1, 1)).to(device),
                    # (10, 32, 24, 64, 80) -> (10, 256, 12, 8, 10)
                    nn.Conv3d(32, 256, kernel_size=(1, 51, 63), stride=(2, 2, 2), padding=(0, 1, 1)).to(device),
                ],
                [
                    # (10, 128, 24, 16, 20) -> (10, 32, 24, 64, 80)
                    nn.ConvTranspose3d(64, 32, kernel_size=(1, 33, 41), stride=(1, 1, 1), padding=(0, 0, 0)).to(device),
                    None,
                    # (10, 64, 24, 32, 40) -> (10, 128, 24, 16, 20)
                    nn.Conv3d(64, 128, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)).to(device),
                    # (10, 64, 24, 32, 40) -> (10, 256, 12, 8, 10)
                    nn.Conv3d(64, 256, kernel_size=(3, 19, 23), stride=(2, 2, 2), padding=(1, 1, 1)).to(device),
                ],
                [
                    # (10, 128, 24, 16, 20) -> (10, 32, 24, 64, 80)
                    nn.ConvTranspose3d(128, 32, kernel_size=(1, 49, 61), stride=(1, 1, 1), padding=(0, 0, 0)).to(device),
                    # (10, 128, 24, 16, 20) -> (10, 64, 24, 32, 40)
                    nn.ConvTranspose3d(128, 64, kernel_size=(1, 17, 21), stride=(1, 1, 1), padding=(0, 0, 0)).to(device),
                    None,
                    # (10, 128, 24, 16, 20) -> (10, 256, 12, 8, 10)
                    nn.Conv3d(128, 256, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)).to(device),
                ],
                [
                    # (10, 256, 12, 8, 10) -> (10, 32, 24, 64, 80)
                    nn.ConvTranspose3d(256, 64, kernel_size=(13, 57, 71), stride=(1, 1, 1), padding=(0, 0, 0)).to(device),
                    # (10, 256, 12, 8, 10) -> (10, 64, 24, 32, 40)
                    nn.ConvTranspose3d(256, 64, kernel_size=(13, 25, 31), stride=(1, 1, 1), padding=(0, 0, 0)).to(device),
                    # (10, 256, 12, 8, 10) -> (10, 128, 24, 16, 20)
                    nn.ConvTranspose3d(256, 128, kernel_size=(13, 9, 11), stride=(1, 1, 1), padding=(0, 0, 0)).to(device),
                    None
                ]
            ]
        ]

    # =========================================================================================

    def _ensure_tuple(k):
        if not isinstance(k, (list, tuple)):
            return [k, k, k]
        return k

    def _falsefilter(x):
        for i in range(3):
            if x[0][i] == target[i]:
                return False
        return True

    paras = list(itertools.filterfalse(lambda x: x[1] == 0, itertools.product([i for i in range(counter) if i & 1 == 1], [0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5])))

    hashtable = {}

    for para in paras:
        para = list(map(_ensure_tuple, para))
        newsize = [
            math.floor(((size - para[0][d] + 2 * para[2][d]) / para[1][d] + 1)) for d, size in enumerate(x.shape[2:])
        ] if not transp else [
            # https://blog.csdn.net/qq_34914551/article/details/89361957
            (size - 1) * para[1][d] - 2 * para[2][d] + para[0][d] for d, size in enumerate(x.shape[2:])
        ]
        key = '_'.join(map(str, newsize))
        if not hashtable.get(key, None):
            hashtable[key] = [newsize]
        hashtable[key].append(para)
    conv = list(itertools.filterfalse(_falsefilter, hashtable.values()))
    print(conv)


def test_conv2():
    x = [
        torch.randn(10, 32, 24, 64, 80),
        torch.randn(10, 64, 24, 32, 40),
        torch.randn(10, 128, 24, 16, 20),
        torch.randn(10, 256, 12, 8, 10),
    ]
    conv = nn.ModuleList([
        nn.ModuleList([
            nn.Conv3d(32, 64, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)),
            nn.Conv3d(64, 128, kernel_size=(3, 3, 3), stride=(1, 2, 2), padding=(1, 1, 1)),
            nn.Conv3d(128, 256, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1)),
        ]),
        nn.ModuleList([
            nn.ConvTranspose3d(256, 128, kernel_size=(2, 2, 2), stride=(2, 2, 2)),
            nn.ConvTranspose3d(128, 64, kernel_size=(1, 2, 2), stride=(1, 2, 2)),
            nn.ConvTranspose3d(64, 32, kernel_size=(1, 2, 2), stride=(1, 2, 2)),
            None
        ])
    ])

    for i in range(len(conv[1])):
        if conv[1][i] is None:
            continue
        x[3] = conv[1][i](x[3])
        print(x[3].shape)


def test_conv3():
    x1 = torch.randn(1, 32, 24, 64, 80)
    x2 = torch.randn(1, 64, 24, 64, 80)

    conv_kwargs = {'kernel_size': (1, 3, 3), 'stride': (1, 2, 2), 'padding': (1, 1, 1)}
    norm_op_kwargs = {'eps': 1e-05, 'affine': True}
    dropout_op_kwargs = {'p': 0, 'inplace': True}
    nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
    layer = StackedTranspConvLayers(
        input_feature_channels=64,
        output_feature_channels=64,
        num_convs=2,
        conv_op=nn.Conv3d,
        conv_kwargs=conv_kwargs,
        norm_op=nn.BatchNorm3d,
        norm_op_kwargs=norm_op_kwargs,
        dropout_op=nn.Dropout3d,
        dropout_op_kwargs=dropout_op_kwargs,
        nonlin_kwargs=nonlin_kwargs,
    )
    print(layer(x2).shape)


def test_spiders():
    x_up = torch.randn(1, 64, 24, 32, 40)
    x_down = torch.randn(1, 128, 24, 16, 20)
    conv_kwargs = {'in_channels': 64, 'out_channels': 128, 'kernel_size': (1, 3, 3), 'stride': (1, 2, 2), 'padding': (0, 1, 1)}
    transp_kwargs = {'in_channels': 128, 'out_channels': 64, 'kernel_size': (1, 2, 2), 'stride': (1, 2, 2)}
    change = Change(
        conv_kwargs=conv_kwargs,
        transp_kwargs=transp_kwargs,
    )
    x_up1, x_down1 = change(x_up, x_down)


if __name__ == '__main__':
    # test_model()
    test_conv()
    # dice()
    # test_conv2()
    # test_spiders()
    # dice_big()
    # split_coarse()
    # split_fine()
