import sys
sys.path.append('.')
sys.path.append('..')
import os
import pandas as pd
import numpy as np
import json
import argparse
from tqdm import tqdm
from utils.helper import get_subdirs
from PIL import Image
from sklearn.model_selection import train_test_split
from utils.helper import read_xml, find_nodes

def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Split data for training Seg model.')
    parser.add_argument("--val_ratio", type=float, default=0.1, help="Define the ratio of validation set.")
    parser.add_argument("--json_path", type=str, default='./path.json', help="Define the ratio of validation set.")
    parser.add_argument("--save_dir", type=str, default='./csv', help="Define the dir to store csv files.")
    # parser.add_argument("--no_defect_code", nargs='+', default=['TSFAS', 'TSDFS', 'NOCOD', 'NOCOD2', 'TFOL0', 'TSFIX'], help="Define the dir to store csv files.")

    args = parser.parse_args()

    return args

def main():
    args = parse_args()
    os.makedirs(args.save_dir, exist_ok=True)
    # args.no_defect_code = [str(x) for x in args.no_defect_code]
    path_dict = json.load(open(args.json_path,'r'))
    all_train_total = pd.DataFrame()
    all_valid_total = pd.DataFrame()
    dict_image_opened = {}
    for pro_name in path_dict.keys():
        idct_per_pro = path_dict[pro_name]
        for site_name in idct_per_pro.keys():
            idct_per_pro_site = idct_per_pro[site_name]
            train_total = pd.DataFrame()
            valid_total = pd.DataFrame()
            for version_name in idct_per_pro_site.keys():
                print(f'Working on {pro_name}-{site_name}-{version_name}.')
                if os.path.exists(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_train.csv')) and os.path.exists(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_valid.csv')):
                    df_train = pd.read_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_train.csv'))
                    df_valid = pd.read_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_valid.csv'))
                    print(f'Csv files for {pro_name}_{site_name}_{version_name} exist.')
                else:
                    dict_cache = idct_per_pro_site[version_name]
                    df_total = pd.DataFrame()
                    img_dir = dict_cache['img_dir']
                    print(f'img_dir: {img_dir}')
                    mask_dir =  dict_cache['anno_dir']
                    code_name_list = get_subdirs(img_dir)
                    for code_name in code_name_list:
                        nolabel_count = 0
                        data_dir = os.path.join(img_dir, code_name)
                        mask_code_dir = os.path.join(mask_dir, code_name)
                        
                        
                        for (dirpath, _, filenames) in os.walk(mask_code_dir):
                            for filename in tqdm(filenames):
                                filetype = filename.split('.')[-1]
                                if filetype == 'xml': 
                                    
                                    try:
                                        
                                        # read label xml to get raw image path
                                        xml_path = os.path.join(dirpath, filename)
                                        box_tree = read_xml(xml_path)
                                        raw_image_path = find_nodes(box_tree, "path")[0].text
                                        
                                        # check if raw image is readable onyl once
                                        if raw_image_path not in dict_image_opened:
                                            dict_image_opened[raw_image_path] = False
                                            
                                        if not dict_image_opened[raw_image_path]:
                                            file_ = Image.open(raw_image_path).load()
                                            dict_image_opened[raw_image_path] = True
                                    
                                        # create dataframe for train valid csv
                                        df_total_cache = pd.DataFrame()
                                        df_total_cache['image'] = [raw_image_path]
                                        df_total_cache['code'] = [code_name]
                                        df_total_cache['box'] = [xml_path]
                                        df_total_cache['product'] = [pro_name]
                                        df_total = pd.concat([df_total, df_total_cache])
                                    except:
                                        print('Image file %s has something wrong, will not used for train.' %raw_image_path)
                        # print(nolabel_count)
                    df_train, df_valid = train_test_split(df_total, test_size=args.val_ratio, random_state=752)
                    df_train.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_train.csv'), index=False)
                    df_valid.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_valid.csv'), index=False)
                train_total = pd.concat([train_total, df_train])
                valid_total = pd.concat([valid_total, df_valid])
            print(train_total.shape)
            train_total.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_train_total.csv'), index=False)
            all_train_total = pd.concat([all_train_total, train_total])
            valid_total.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_valid_total.csv'), index=False)
            all_valid_total = pd.concat([all_valid_total, valid_total])

    all_train_total.to_csv(os.path.join(args.save_dir,'all_train.csv'), index=False)
    all_valid_total.to_csv(os.path.join(args.save_dir,'all_valid.csv'), index=False)
    
    

if __name__ == '__main__':
    main()

