import glob
from argparse import ArgumentParser
from pathlib import Path

import nibabel as nib
import numpy as np
from sklearn.preprocessing import MinMaxScaler

parser = ArgumentParser()
parser.add_argument('-t', '--train_data_path', type=Path,
                    default='datasets/BraTS2020/origin/MICCAI_BraTS2020_TrainingData')
parser.add_argument('-o', '--out_dir', type=Path,
                    default='datasets/BraTS2020')
args = parser.parse_args()


scaler = MinMaxScaler()

t2_list = sorted(glob.glob(str(args.train_data_path / '*/*t2.nii.gz')))
t1ce_list = sorted(glob.glob(str(args.train_data_path / '*/*t1ce.nii.gz')))
flair_list = sorted(glob.glob(str(args.train_data_path / '*/*flair.nii.gz')))
mask_list = sorted(glob.glob(str(args.train_data_path / '*/*seg.nii.gz')))

# Each volume generates 18 64x64x64x4 sub-volumes.
# Total 369 volumes = 6642 sub volumes

for img in range(len(t2_list)):  # Using t1_list as all lists are of same size
    print("Now preparing image and masks number: ", img)

    temp_image_t2 = nib.load(t2_list[img]).get_fdata()
    temp_image_t2 = scaler.fit_transform(temp_image_t2.reshape(-1, temp_image_t2.shape[-1])).reshape(
        temp_image_t2.shape)  # 在H、W上做归一化。

    temp_image_t1ce = nib.load(t1ce_list[img]).get_fdata()
    temp_image_t1ce = scaler.fit_transform(temp_image_t1ce.reshape(-1, temp_image_t1ce.shape[-1])).reshape(
        temp_image_t1ce.shape)

    temp_image_flair = nib.load(flair_list[img]).get_fdata()
    temp_image_flair = scaler.fit_transform(temp_image_flair.reshape(-1, temp_image_flair.shape[-1])).reshape(
        temp_image_flair.shape)

    temp_mask = nib.load(mask_list[img]).get_fdata()
    temp_mask = temp_mask.astype(np.uint8)
    temp_mask[temp_mask == 4] = 1  # Reassign mask values 4 to 1
    temp_mask[temp_mask == 2] = 1  # Reassign mask values 2 to 1

    # print(np.unique(temp_mask))

    temp_combined_images = np.stack([temp_image_flair, temp_image_t1ce, temp_image_t2], axis=3)

    # Crop to a size to be divisible by 64 so we can later extract 64x64x64 patches.
    # cropping x, y, and z
    temp_combined_images = temp_combined_images[56:184, 56:184, 13:141]
    temp_mask = temp_mask[56:184, 56:184, 13:141]

    val, counts = np.unique(temp_mask, return_counts=True)

    for n_slice in range(temp_combined_images.shape[2]):
        np.save(args.out_dir / 'img' / ('image_' + str(img) + '_' + str(n_slice) + '.npy'),
                temp_combined_images[:, :, n_slice])
        np.save(args.out_dir / 'gt' / ('mask_' + str(img) + '_' + str(n_slice) + '.npy'),
                temp_mask[:, :, n_slice])
