import sys
sys.path.append('..')
import os
import glob
import pandas as pd
import numpy as np
import json
import argparse
from tqdm import tqdm
from utils.helper import get_subdirs
from pathlib import Path
from PIL import Image
from sklearn.model_selection import train_test_split
import random
from copy import deepcopy

def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Split data for training Seg model.')
    parser.add_argument("--val_ratio", type=float, default=0.1, help="Define the ratio of validation set.")
    parser.add_argument("--json_path", type=str, default='./my_path.json', help="Define the ratio of validation set.")
    parser.add_argument("--save_dir", type=str, default='./csv/650E9_T560/', help="Define the dir to store csv files.")
    parser.add_argument("--no_defect_code", nargs='+', default=['TEAS', 'TSFAS', 'TSDFS', 'NOCOD', 'NOCOD2', 'TFOL0', 'TSFIX', 'FCFSX'], help="Define the dir to store csv files.")
    parser.add_argument("--img_ext", nargs='+', default=['jpg', 'JPG'], help="img format for training samples.")

    args = parser.parse_args()

    return args

def get_extensions(path):
    ListFiles = os.walk(path)
    SplitTypes = []
    for walk_output in ListFiles:
        for file_name in walk_output[-1]:
            SplitTypes.append(file_name.split(".")[-1])
    extensions, counts = np.unique(SplitTypes, return_counts=True)
    return extensions[np.argmax(counts)]

def check_json(json_path):
    with open(json_path, 'r+', encoding="utf-8") as f:
        data = json.load(f)
    label_check = True
    for i in range(len(data['shapes'])):
        shape_slice = data['shapes'][i]
        points_slice = shape_slice['points']
        if len(points_slice) < 3:
            label_check = False

    return label_check

def my_train_test_split(data, test_size=0.1, by='code', random_state=752):
    codes = np.unique(data[by])
    train_df = None
    val_df = None
    for code in codes:
        tmp = data[data[by]==code].reset_index(drop=True)
        tmp = tmp.sample(frac=1, random_state=random_state).reset_index(drop=True)
        len_train = int(len(tmp) * (1-test_size))
        if len_train == 0:
            train_df = pd.concat([train_df, tmp[:]])
        else:
            train_df = pd.concat([train_df, tmp[:len_train]])
            val_df = pd.concat([val_df, tmp[len_train:]])
    return train_df, val_df

def main():
    args = parse_args()
    os.makedirs(args.save_dir, exist_ok=True)
    args.no_defect_code = [str(x) for x in args.no_defect_code]
    path_dict = json.load(open(args.json_path,'r'))
    all_train_total = pd.DataFrame()
    all_valid_total = pd.DataFrame()
    random.seed = 752
    for pro_name in path_dict.keys():
        idct_per_pro = path_dict[pro_name]
        for site_name in idct_per_pro.keys():
            idct_per_pro_site = idct_per_pro[site_name]
            train_total = pd.DataFrame()
            valid_total = pd.DataFrame()
            for version_name in idct_per_pro_site.keys():
                print(f'Working on {pro_name}-{site_name}-{version_name}.')
                if os.path.exists(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_train.csv')) and os.path.exists(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_valid.csv')):
                    df_train = pd.read_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_train.csv'))
                    df_valid = pd.read_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_valid.csv'))
                    print(f'Csv files for {pro_name}_{site_name}_{version_name} exist.')
                else:
                    dict_cache = idct_per_pro_site[version_name]
                    df_total = pd.DataFrame()
                    img_dir = dict_cache['img_dir']
                    mask_dir =  dict_cache['mask_dir']
                    code_name_list = get_subdirs(img_dir)
                    for code_name in code_name_list:
                        nolabel_count = 0
                        data_dir = os.path.join(img_dir, code_name)
                        img_list = []
                        for img_ext in args.img_ext:
                            img_list.extend(sorted(glob.glob(os.path.join(data_dir, '*.' + str(img_ext)))))
                        for img_cache in tqdm(img_list, f'{code_name}'):
                            try:
                                # file_ = Image.open(img_cache).load()
                                df_total_cache = pd.DataFrame()
                                base_name = Path(img_cache).stem
                                code_mask_dir = os.path.join(mask_dir, code_name)
                                if os.path.exists(os.path.join(code_mask_dir, base_name + '.json')):
                                    label_check = check_json(os.path.join(code_mask_dir, base_name + '.json'))
                                    if label_check:
                                        df_total_cache['image'] = [img_cache]
                                        df_total_cache['label'] = [os.path.join(code_mask_dir, base_name + '.json')]
                                        df_total_cache['product'] = [pro_name]
                                        df_total_cache['code'] = [code_name]
                                        df_total = pd.concat([df_total, df_total_cache])
                                elif os.path.exists(os.path.join(code_mask_dir, base_name + '.png')):
                                    df_total_cache['image'] = [img_cache]
                                    df_total_cache['label'] = [os.path.join(code_mask_dir, base_name + '.png')]
                                    df_total_cache['product'] = [pro_name]
                                    df_total_cache['code'] = [code_name]
                                    df_total = pd.concat([df_total, df_total_cache])
                                else:
                                    if code_name in args.no_defect_code:
                                        times = 2
                                        for _ in range(times):
                                            df_total_cache['image'] = [img_cache]
                                            df_total_cache['label'] = [str(0)]
                                            df_total_cache['product'] = [pro_name]
                                            df_total_cache['code'] = [code_name]
                                            df_total = pd.concat([df_total, df_total_cache])
                                    else:
                                        nolabel_count += 1
                                        continue
                            except IOError:
                                print('Image file %s has something wrong, will not used for train.' %img_cache)
                    print('nolabel_count: ', nolabel_count)
                    print('df_total shape: ', df_total.shape)
                    print('df_total["code"] value_counts: ', df_total['code'].value_counts())
                    # df_train, df_valid = train_test_split(df_total, test_size=args.val_ratio, stratify=df_total['code'], random_state=752)
                    df_train, df_valid = my_train_test_split(data=df_total, test_size=args.val_ratio, by='code', random_state=752)
                    df_train.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_train.csv'), index=False)
                    df_valid.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_{version_name}_valid.csv'), index=False)
                train_total = pd.concat([train_total, df_train])
                valid_total = pd.concat([valid_total, df_valid])

            train_total.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_train_total.csv'), index=False)
            all_train_total = pd.concat([all_train_total, train_total])
            valid_total.to_csv(os.path.join(args.save_dir,f'{pro_name}_{site_name}_valid_total.csv'), index=False)
            all_valid_total = pd.concat([all_valid_total, valid_total])

    all_train_total.to_csv(os.path.join(args.save_dir,'train.csv'), index=False)
    all_valid_total.to_csv(os.path.join(args.save_dir,'valid.csv'), index=False)



if __name__ == '__main__':
    main()
