from typing import Dict,Tuple,List,Callable
import os,random
import numpy as np
# from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf


def load_img(img_path):
    return tf.keras.preprocessing.image.load_img(img_path)

def img_to_array(img):
    return tf.keras.preprocessing.image.img_to_array(img)


class myDataSet:
    def __init__(self,classification_dict:Dict=None,data_path:str=None,labeled_index_path:str=None,
                 trainSet_index_path:str=None,testSet_index_path:str=None,valSet_index_path:str=None,
                 preprocess_func:Callable=None,new_index_file:bool=True):
        """
        @param classification_dict: a dictionary that maps class names to their corresponding IDs
        @param data_path: the path to the dataset
        @param labeled_index_path: the path to the labeled index file
        @param trainSet_index_path: the path to the train set index file
        @param testSet_index_path: the path to the test set index file
        @param valSet_index_path: the path to the validation set index file
        @param preprocess_func: a function that takes an image and returns an image
        @param new_index_file: whether to generate new index files or not, 
         if False, the index files must exist
        """
        self.Class_Names_2_IDs=classification_dict
        self.Data_path = data_path
        self.Labeled_Index_path = labeled_index_path
        self.Train_Set_Index_path = trainSet_index_path
        self.Test_Set_Index_path = testSet_index_path
        self.Val_Set_Index_path = valSet_index_path
        if((new_index_file==True) and (self.Data_path!=None) and (self.Labeled_Index_path!=None)):
            self.__Get_Labeled_Index()
            self.__Split_Labeled_Index()
        else:
            print("dataset: Using existing index files")
        self.Preprocess_Func = preprocess_func

    def __Get_Labeled_Index(self):
        """
        * 根据Data_path下的目录结构生成Labeled_Index_path下的索引文件
        """
        # 准备写入索引文件
        fd = open(self.Labeled_Index_path, 'w')
        # 遍历每个类别名称,
        for class_name in self.Class_Names_2_IDs.keys():
            # 将data_dir下与该类别同名的子目录下的所有文件的名称存储到列表中
            images_list = os.listdir(self.Data_path + class_name)
            # 遍历该类别下的所有图像文件名，贴标签后下入文本文件中
            for image_name in images_list:
                # 格式为：类别名/文件名(等价于图像文件在data_dir下的相对路径) 类别ID，如：cardboard/cardboard1.jpg 0
                fd.write('{}/{} {}\n'
                         .format(class_name, image_name, self.Class_Names_2_IDs[class_name]))
        fd.close()
        print("dataset: Labeled index file generated")

    def __Split_Labeled_Index(self):
        """
        * 根据Labeled_Index_path下的索引文件,将数据集随机划分为训练集、测试集、验证集
        * 并将划分结果保存到Train_Set_Index_path、Test_Set_Index_path、Val_Set_Index_path下的索引文件中
        * 未开发完成,只分了训练集和验证集,测试集的索引文件不会创建
        """
        _NUM_VALIDATION = 505
        _RANDOM_SEED = 0
        # 读取索引文件
        fd = open(self.Labeled_Index_path)
        lines = fd.readlines()
        fd.close()
        # 打乱索引文件
        random.seed(_RANDOM_SEED)
        random.shuffle(lines)
        # 划分训练集、测试集、验证集
        # 训练集
        fd = open(self.Train_Set_Index_path, 'w')
        for line in lines[_NUM_VALIDATION:]:
            fd.write(line)
        fd.close()
        print("dataset: Train set index file generated")
        # 验证集
        fd = open(self.Val_Set_Index_path, 'w')
        for line in lines[:_NUM_VALIDATION]:
            fd.write(line)
        fd.close()
        print("dataset: Val set index file generated")
        # 测试集
        # 未开发完成
        print("dataset: Test set index file not generated")

    def __Load_Index_Lable(self,dataset_type:str,verbose:bool=False)->Tuple[List,List]:
        """
        * 读取指定索引文件,返回图像相对Data_path的路径列表和标签列表
        """
        indexFile_path=None
        if(dataset_type=="train"):
            # 读取训练集
            indexFile_path=self.Train_Set_Index_path
        elif(dataset_type=="test"):
            # 读取测试集
            indexFile_path=self.Test_Set_Index_path
        elif(dataset_type=="val"):
            # 读取验证集
            indexFile_path=self.Val_Set_Index_path
        else:
            raise ValueError("dataset_type must be 'train', 'test', or 'val'")
        fd = open(indexFile_path)
        path_x = []
        y = []
        for line in fd.readlines():
            # 文件名和标签分割(x & y)
            l=line.strip()
            path_x.append(l[:-2])
            y.append(int(l[-1]))
            if(verbose==True):
                print(l)
        fd.close()
        print("dataset: from {} load labeled index".format(indexFile_path))
        return path_x, y

    def __Read_Preprocess_Image(self,img_path_list:List)->List[np.ndarray]:
        """
        * 读取文件名列表指向的图像数据,并返回预处理后的图像数据列表
        """
        images = []
        for image_path in img_path_list:
            img_load = load_img(self.Data_path+image_path)
            img = img_to_array(img_load)
            if(self.Preprocess_Func!=None):
                img = self.Preprocess_Func(img)
            else:
                raise ValueError("Preprocess_Func is not specified, please specify it when init")
            images.append(img)
        print("dataset: load images according to img_path_list and preprocess")
        return images
    
    def Get_Preprocess_Img(self,img_path_list:List):
        """
        * 获取目标路径下的图像数据,并进行预处理
        @param img_path_list: 图像路径列表
        @return: 预处理后的图像数据列表
        """
        return self.__Read_Preprocess_Image(img_path_list)

    def Load(self,dataset_type:str,verbose:bool=False)->Tuple[List,List]:
        """
        * 从索引文件中读取数据集
        @param dataset_type: 要读取的数据集类型,可以是"train", "test", "val"
        @return: x,y 返回数据集的图像数据列表和标签列表
        """
        path_x, y = self.__Load_Index_Lable(dataset_type,verbose)
        x = self.__Read_Preprocess_Image(path_x)
        return x, y

def test0():
    fd=open("file0.txt",'w')
    fd.write("hello world")
    fd.close()

def main():
    test0()

if(__name__=="__main__"):
    main()
