
import multiprocessing
import shutil
from multiprocessing import Pool

from batchgenerators.utilities.file_and_folder_operations import *

from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json
from nnunetv2.paths import nnUNet_raw
from skimage import io
from acvl_utils.morphology.morphology_helper import generic_filter_components
from scipy.ndimage import binary_fill_holes
from pathlib import Path
from tqdm import tqdm
import pandas as pd
from PIL import Image

#  1: Foreground 2:Background 3: Not classified
labels = {
    'background': 0, 'foreground': 1, 'not classified': 2
}


def load_and_convert_case(input_image: str, input_seg: str, output_image: str, output_seg: str):
    seg = io.imread(input_seg)
    seg[seg == 2] = 0
    seg[seg == 3] = 2

    io.imsave(output_seg, seg, check_contrast=False)
    # shutil.copy(input_image, output_image)
    Image.open(input_image).convert('RGB').save(output_image)

if __name__ == "__main__":
    source = Path('/media/nas/yjiang/dataset/TheOxford-IIITPetDataset')

    dataset_name = 'Dataset994_Oxford_IIIT_Pet'

    root = Path(nnUNet_raw)
    imagestr = root / dataset_name / 'imagesTr'
    imagests = root / dataset_name / 'imagesTs'
    labelstr = root / dataset_name / 'labelsTr'
    labelsts = root / dataset_name / 'labelsTs'
    maybe_mkdir_p(imagestr)
    maybe_mkdir_p(imagests)
    maybe_mkdir_p(labelstr)
    maybe_mkdir_p(labelsts)
    
    train_val_txt = source / 'annotations' / 'trainval.txt'
    test_txt = source / 'annotations' / 'test.txt'

    train_val = pd.read_csv(train_val_txt, sep=' ', header=None)[0].to_list()
    test = pd.read_csv(test_txt, sep=' ', header=None)[0].to_list()

    # train_imgs = [join(source, 'images', i+'.jpg') for i in train_val]
    # train
    # test_imgs = [join(source, 'images', i+'.jpg') for i in test]
    
    num_train = len(train_val)

    r = []
    with multiprocessing.get_context("spawn").Pool(multiprocessing.cpu_count()) as p:
        for name in train_val:
            r.append(
                p.starmap_async(
                    load_and_convert_case,
                    ((
                        source / 'images' / '{}.jpg'.format(name), 
                        source / 'annotations' / 'trimaps' / '{}.png'.format(name), 
                        imagestr / '{}_0000.png'.format(name), 
                        labelstr / '{}.png'.format(name)
                    ),)
                )
            )

        for name in test:
            r.append(
                p.starmap_async(
                    load_and_convert_case,
                    ((
                        source / 'images' / '{}.jpg'.format(name), 
                        source / 'annotations' / 'trimaps' / '{}.png'.format(name), 
                        imagests / '{}_0000.png'.format(name),
                        labelsts / '{}.png'.format(name), 
                    ),)
                )
            )

        for i in tqdm(r):
            i.get()
    
    generate_dataset_json(join(nnUNet_raw, dataset_name), {0: 'R', 1: 'G', 2: 'B'}, labels,
                          num_train, '.png', dataset_name=dataset_name)