import os
import random
import copy
import numpy as np

# the number of HTM and HCN is not balance,so make HCN dataset double size 
# [1,2] means data with label 0 add once ,data with label 1 add twice 
def kfolder_resnet_single(num_folder=5,image_dir='../data/dataset_resnet_single',seed=2021,need_val=False, shuffle = False):
    # generate image path list
    image_path_dict = {}
    for label_dir_name in os.listdir(image_dir):
        label_dir = os.path.join(image_dir, label_dir_name)
        image_path_list = [os.path.join(label_dir, image_path) for image_path in os.listdir(label_dir)]
        image_path_list.sort()
        image_path_dict [label_dir_name]= image_path_list

    # shuffle images
    if shuffle:
        for label in image_path_dict.keys():
            image_path_list = copy.deepcopy(image_path_dict[label])
            random.seed(seed)
            random.shuffle(image_path_list)
            image_path_dict[label] = image_path_list
            # print (image_path_dict[label])
    
    # generate train and test dataset  by folders
    train_image_path_lists =[]
    val_image_path_lists =[]
    test_image_path_lists =[]

    for floder_index in range(num_folder):
        
        train_image_path_list =[]
        test_image_path_list =[]
        

        for split_index in range (num_folder):


            for label in image_path_dict.keys():
                image_path_list = image_path_dict[label]

                num_perfolder = len (image_path_list)//num_folder
                index_add_one = len(image_path_list)%num_folder
                
                
                num_list = [num_perfolder+1 if num<index_add_one else num_perfolder for num in range(num_folder)]
                
                start_index = int(np.sum(num_list[:split_index]))
                end_index = int(np.sum(num_list[:split_index+1]))
                # print('index',start_index,end_index,len (image_path_list),index_add_one)
                split_list = image_path_list [start_index:end_index]

                # add sample path to train and test list
                # the times of adding is the same as the argument_ratio
                if split_index == floder_index:
                    test_image_path_list += split_list
                else:
                    train_image_path_list += split_list
        if not need_val:
            train_image_path_lists.append(copy.deepcopy(train_image_path_list))
            test_image_path_lists.append(copy.deepcopy(test_image_path_list))
        else:
            len_train = len(train_image_path_list)
            random.seed(seed)
            random.shuffle(train_image_path_list)
            train_image_path_lists.append(copy.deepcopy(train_image_path_list[0:len_train-len_train//4]))
            val_image_path_lists.append(copy.deepcopy(train_image_path_list[len_train-len_train//4+1:len_train-1]))
            test_image_path_lists.append(copy.deepcopy(test_image_path_list))

        # print (len(train_image_path_list),len(test_image_path_list))
        # print (test_image_path_list)
    if not need_val:
        return train_image_path_lists,test_image_path_lists
    else:
        return train_image_path_lists,val_image_path_lists,test_image_path_lists



# the number of HTM and HCN is not balance,so make HCN dataset double size 
# [1,2] means data with label 0 add once ,data with label 1 add twice 
def kfolder_resnet_double(num_folder=5,image_dir='../data',seed=2021,need_val=False,shuffle=False):
    # generate image path list
    image_path_dict = {}
    for label_dir_name in os.listdir(image_dir):
        label_dir = os.path.join(image_dir, label_dir_name)
        # print (label_dir)
        image_name_list = os.listdir(os.path.join(label_dir,'image'))
        image_name_list.sort()
        image_path_dict [label_dir_name]= [[os.path.join(label_dir, 'image',image_name),os.path.join(label_dir, 'mask',image_name)] for image_name in image_name_list ]


    # shuffle images
    if shuffle:
        for label in image_path_dict.keys():
            image_path_list = copy.deepcopy(image_path_dict[label])
            random.seed(seed)
            random.shuffle(image_path_list)
            image_path_dict[label] = image_path_list
            # print (image_path_dict[label])
    
    # generate train and test dataset  by folders
    train_image_path_lists =[]
    test_image_path_lists =[]

    for floder_index in range(num_folder):
        
        train_image_path_list =[]
        test_image_path_list =[]

        for split_index in range (num_folder):

            for label in image_path_dict.keys():

                image_path_list = image_path_dict[label]

                num_perfolder = len (image_path_list)//num_folder
                index_add_one = len(image_path_list)%num_folder
                
                
                num_list = [num_perfolder+1 if num<index_add_one else num_perfolder for num in range(num_folder)]
                
                start_index = int(np.sum(num_list[:split_index]))
                end_index = int(np.sum(num_list[:split_index+1]))
                # print('index',start_index,end_index,len (image_path_list),index_add_one)
                split_list = image_path_list [start_index:end_index]

                # add sample path to train and test list
                # the times of adding is the same as the argument_ratio
                if split_index == floder_index:
                    # for i in range (argument_ratio):
                    test_image_path_list += split_list
                else:
                    train_image_path_list += split_list

        # print (len(train_image_path_list),len(test_image_path_list))
        # print (test_image_path_list)
        train_image_path_lists.append(copy.deepcopy(train_image_path_list))
        test_image_path_lists.append(copy.deepcopy(test_image_path_list))
    return train_image_path_lists,test_image_path_lists

# print (kfolder_resnet_double(image_dir=r'data/BiYanAi_Ori'))