"""
Brief:
  获取训练集数据和交叉验证集数据
Author:GL
QQ:1748462079
Date:2021/01
"""
import numpy as np 
import glob
import os
from sklearn.model_selection import train_test_split

def parse_class_name(path:str):
    '''
    Brief:
      解析.name文件，获取label名称和编码的键值对(dict)
    Args:
      path(str): .name文件路径,sample:"cnn.name"
    Return:
      label_dict(dict):键值对,key为类名(str),值为类对应的数字
    '''
    if path[-5:] != '.name':
        #如果后缀不对
        print('文件后缀需要为.name')
        return None

    label_array = np.loadtxt(path,dtype=str,delimiter=' ')
    label_dict = {}
    for record in label_array:
        label_dict[record[0]] = int(record[1])
    return label_dict

def get_src_dataset_list(root_dir:str,suffix_list=['.jpg','.png','.jpeg']):
    '''
    Brief：
      获取原始的数据集信息,并根据数据集下的.name文件读取相关文件夹下的图像
    Args:
      root_dir(str):数据集根目录,数据集的组织格式：
        |--/root_dir
        |----/1
        |----/2
        |----/3
        |----/4
        |----/5
        |----class.name ->数据标签文件
      suffix_list(list):支持的后缀类型
    Return:
      dataset(np.array):dtype为str，shape为[nums,2]，每一行为filename,class
    '''
    label_path = glob.glob(root_dir+'/*.name')
    print(label_path)
    if len(label_path) !=1:
        print("当前根目录下无标签文件或存在多个标签文件")
        raise FileNotFoundError

    #解析.name文件，产生一个hash表
    label_dict = parse_class_name(label_path[0])
    labels = list(label_dict.keys())
    dataset = []
    for label in labels:
        label_code = label_dict[label]
        for suffix in suffix_list:
            filenames = glob.glob(root_dir+'/'+label+'/*'+suffix)
            #利用glob找到所有符合条件的文件，这里可能需要用到一些正则表达式相关内容
            for filename in filenames:
                dataset.append([filename,str(label_code)])

    dataset = np.array(dataset,dtype=str)
    return dataset

def spilt_dataset(dataset,prop=0.1,output_path=None):
    '''
    Brief:
      划分并打乱数据集为训练集和交叉验证集,并将数据集文件保存
    Args:
      prop(int):0-1,交叉验证集比例
      output_path(str):输出路径，不指定则在当前目录
    '''
    data = dataset[:,0]
    label = dataset[:,1]
    x_train, x_test,  y_train, y_test = train_test_split(data, label, test_size = prop, random_state = 7)
    train = np.concatenate((np.expand_dims(x_train,axis=1),np.expand_dims(y_train,axis=1)),axis = 1)
    valid = np.concatenate((np.expand_dims(x_test,axis=1),np.expand_dims(y_test,axis=1)),axis = 1)
    if output_path is None:
        np.savetxt('./train.txt',train,fmt="%s")
        np.savetxt('./valid.txt',valid,fmt="%s")

    else :
        np.savetxt(output_path+'/train.txt',train,fmt="%s")
        np.savetxt(output_path+'/valid.txt',valid,fmt="%s")
    
                


if __name__=='__main__':
    import argparse
    parser = argparse.ArgumentParser(description="gen_dataset_list")
    parser.add_argument('--root-dir', type=str,metavar=':root dir of dataset',default="/home/lin/桌面/HITCRT/CNN_picture/train")
    parser.add_argument('--output-dir', type=str,metavar=':txt file output dir',default="/home/lin/桌面/HITCRT/CNNClassifier/scripts_gray")
    args = parser.parse_args()
    dataset = get_src_dataset_list(args.root_dir)
    spilt_dataset(dataset,output_path = args.output_dir)
