import sys
sys.path.append('..')
import os
import glob
import pandas as pd
import numpy as np
import json
import argparse
from tqdm import tqdm
from utils.helper import get_subdirs
from pathlib import Path
from PIL import Image


def parse_args():
    """
    Set args parameters
    """
    parser = argparse.ArgumentParser(description='Split data for training Classification model.')

    parser.add_argument("--img_dirs", type=list, default=[
          r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/t7_X890/3865D06/data_20250325/Images',
          r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/t7_X890/3875D04/data_20250325/Images'
    ], help="")

    parser.add_argument("--anno_dirs", type=list, default=[
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/t7_X890/3865D06/data_20250325/Annotations_xml',
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/t7_X890/3875D04/data_20250325/Annotations_xml'
    ], help="")
    parser.add_argument("--save_dir", type=str, default='./csv/AA_T7_3890', help="Define the dir to store csv files.")
    parser.add_argument("--label_file_suffix", type=str, default='.xml', help="")
    parser.add_argument("--is_split", type=int, default=1, help="whether split the dataset.")
    parser.add_argument("--val_ratio", type=float, default=0.15, help="Define the ratio of validation set.")
    parser.add_argument("--save_file_names", type=str, default='train_20250409.csv,val_20250409.csv', help="Define the dir to store csv files.")
    parser.add_argument("--no_defect_code_train", type=int, default=0, help="是否训练无缺陷code")
    parser.add_argument("--blur_code_train", type=int, default=0, help="是否训练模糊类code")
    parser.add_argument("--no_defect_code", type=str, default='TGXID', help="无缺陷类")
    parser.add_argument("--blur_code", type=str, default='BLUR', help="模糊类别code")
    parser.add_argument("--tail_code_name", type=str, default='TN0000', help="其他类code")
    parser.add_argument("--code_minimum_num", type=int, default=10, help="最少需求code数量")

    parser.add_argument("--key_code_pr_path", type=str, default=None, help=".")
    parser.add_argument("--must_copy_codes", type=str, default="", help=".")
    parser.add_argument("--key_code_min_num", type=str, default="150", help=".")
    parser.add_argument("--copy_code_min_num", type=str, default="150", help=".")
    parser.add_argument("--limited_codes", type=str, default="", help=".")
    parser.add_argument("--limited_num", type=str, default="", help=".")
    parser.add_argument("--copy_type", type=int, default=2, help="copy类型")
    parser.add_argument("--copy_multiple", type=int, default=1, help="copy倍数")

    parser.add_argument("--use_color_transfer", type=int, default=0, help=".")
    parser.add_argument("--temp_dir", type=str, default='tmp_dir', help=".")
    parser.add_argument("--color_transfer_mean", nargs='+', default=[79.94, 111, 128.6], help=".")
    parser.add_argument("--color_transfer_std", nargs='+', default=[39.38, 5.812, 10.375], help=".")

    args = parser.parse_args()

    return args

def img_check(img_path):
    try:
        file_ = Image.open(img_path).load()
        return True
    except:
        return False

def get_label_path(mask_dir, code, img_path, label_suffix, blur_train, blur_codes, no_defect_train, no_defect_codes):
    lable_name = Path(img_path).with_suffix(label_suffix).name
    label_path = os.path.join(mask_dir, code, lable_name)
    if os.path.exists(label_path):
        return label_path
    
    if (blur_train and code in blur_codes) or  (no_defect_train and code in no_defect_codes):
        return '0'
    return '-1'

def get_extensions(path):
    ListFiles = os.walk(path)
    SplitTypes = []
    for walk_output in ListFiles:
        for file_name in walk_output[-1]:
            SplitTypes.append(file_name.split(".")[-1])
    extensions, counts = np.unique(SplitTypes, return_counts=True)
    return extensions[np.argmax(counts)]

def train_test_split_by_code(df, test_size=0.1, random_state=752, col='code', test_min_num=1):
    df_train, df_test = pd.DataFrame(), pd.DataFrame()
    cluster = np.unique(df[col])
    for c in cluster:
        tmp_df = df[df[col]==c].reset_index(drop=True)
        if len(tmp_df) == 1:
            df_train = pd.concat([df_train, tmp_df])
        else:
            tmp_df = tmp_df.sample(frac=1, random_state=random_state).reset_index(drop=True)
            test_num = max(int(len(tmp_df)*test_size), test_min_num)
            
            df_train = pd.concat([df_train, tmp_df[test_num:]]) 
            df_test = pd.concat([df_test, tmp_df[:test_num]])
    return df_train.reset_index(drop=True), df_test.reset_index(drop=True)

def file_name_check(file_names, is_dataset_split):
    if is_dataset_split:
        if len(file_names) == 2:
            return file_names
        return ['train_of_label_box_color_transfer.csv', 'val_of_label_box_color_transfer.csv']
    else:
        if len(file_names) == 1:
            return file_names
        return ['test_of_yolo_box.csv']


def del_some_num(df, delete_dict, random_state=752):
    if len(delete_dict.keys()) < 1:
        return df
    for code, num in delete_dict.items():
        df1 = df[df['code']==code]
        if len(df1) <= 2:
            continue
        df2 = df[df['code']!=code]
        df1 = df1.sample(frac=1, random_state=random_state).reset_index(drop=True)

        left_num = max(2, len(df1)-num)
        print('code %s delete %d imgs'%(code, len(df1)-left_num))
        df1 = df1.iloc[:left_num, :]
        df = pd.concat([df1, df2]).reset_index(drop=True)
    return df

def code_copy(df, codes, copy_code_min, copy_type, copy_multiple):
    # copy_code_min []
    if copy_type not in [1, 2]:
        return df
        
    if copy_type == 1:
        if copy_multiple > 0:
            df_key_code = df[df['code'].isin(codes)]
            df_other = df[~df['code'].isin(codes)]
            res_df = df_key_code.copy()
            if len(df_key_code) > 0:
                for _ in range(copy_multiple):
                    res_df = pd.concat([res_df, df_key_code])
            res_df = pd.concat([df_other, res_df]).reset_index(drop=True)
    elif copy_type == 2:
        res_df = df[~df['code'].isin(codes)]
        res_in_codes = df[df['code'].isin(codes)].reset_index(drop=True)
        if len(res_in_codes) < 1:
            return df
        for index,code in enumerate(codes):
            tmp_res = res_in_codes[res_in_codes['code'] == code]
            if len(tmp_res) < 1:
                continue
            if len(tmp_res) < copy_code_min[index]:
                while len(tmp_res) < copy_code_min[index]:
                    tmp_res = pd.concat([tmp_res, tmp_res]).reset_index(drop=True)
                res_df = pd.concat([res_df, tmp_res.iloc[:copy_code_min[index], :]]).reset_index(drop=True)
            else:
                res_df = pd.concat([res_df, tmp_res]).reset_index(drop=True)
        
    return res_df


def minmium_code_check(df, code_minimum_num):
    codes = list(set(df['code'].tolist()))
    for code in codes:
        p1 = df[df['code']!=code]
        p2 = df[df['code']==code]
        n = len(p2)
        if n < code_minimum_num:
            print(f'DEL code: {code}, num: {n}, less than code_minimun_num: {code_minimum_num} !!!')
            df = p1
    return df.reset_index(drop=True)


def limited_codes_cut(df, limited_codes, limited_num, random_state=752):
    if len(df) == 0:
        return df
    for index, code in enumerate(limited_codes):
        tmp_df = df[df['code'] == code].reset_index(drop=True)
        if len(tmp_df) > limited_num[index]:
            tmp_df2 = df[df['code'] != code].reset_index(drop=True)
            tmp_df.sample(frac=1, random_state=random_state).reset_index(drop=True)
            df = pd.concat([tmp_df2, tmp_df.iloc[:limited_num[index], :]]).reset_index(drop=True)
    return df

def str_to_list(args):
    # img_dirs = args.img_dirs.split(',')
    # if len(img_dirs) == 0 or (len(img_dirs) == 1 and img_dirs[0]==''):
    #     args.img_dirs = []
    # else:
    #     args.img_dirs = [x.strip() for x in img_dirs]

    # anno_dirs = args.anno_dirs.split(',')
    # if len(anno_dirs) == 0 or (len(anno_dirs) == 1 and anno_dirs[0]==''):
    #     args.anno_dirs = []
    # else:
    #     args.anno_dirs = [x.strip() for x in anno_dirs]

    save_file_names = args.save_file_names.split(',')
    if len(save_file_names) == 0 or (len(save_file_names) == 1 and save_file_names[0]==''):
        args.save_file_names = []
    else:
        args.save_file_names = [x.strip() for x in save_file_names]

    no_defect_code = args.no_defect_code.split(',')
    if len(no_defect_code) == 0 or (len(no_defect_code) == 1 and no_defect_code[0]==''):
        args.no_defect_code = []
    else:
        args.no_defect_code = [x.strip() for x in no_defect_code]

    blur_code = args.blur_code.split(',')
    if len(blur_code) == 0 or (len(blur_code) == 1 and blur_code[0]==''):
        args.blur_code = []
    else:
        args.blur_code = [x.strip() for x in blur_code]

    must_copy_codes = args.must_copy_codes.split(',')
    if len(must_copy_codes) == 0 or (len(must_copy_codes) == 1 and must_copy_codes[0]==''):
        args.must_copy_codes = []
    else:
        args.must_copy_codes = [x.strip() for x in must_copy_codes]

    limited_codes = args.limited_codes.split(',')
    if len(limited_codes) == 0 or (len(limited_codes) == 1 and limited_codes[0]==''):
        args.limited_codes = []
    else:
        args.limited_codes = [x.strip() for x in limited_codes]

    limited_num = args.limited_num.split(',')
    if len(limited_num) == 0 or (len(limited_num) == 1 and limited_num[0]==''):
        args.limited_num = []
    else:
        args.limited_num = [int(x.strip()) for x in limited_num]

    key_code_min_num = args.key_code_min_num.split(',')
    if len(key_code_min_num) == 0 or (len(key_code_min_num) == 1 and key_code_min_num[0]==''):
        args.key_code_min_num = []
    else:
        args.key_code_min_num = [int(x.strip()) for x in key_code_min_num]

    copy_code_min_num = args.copy_code_min_num.split(',')
    if len(copy_code_min_num) == 0 or (len(copy_code_min_num) == 1 and copy_code_min_num[0]==''):
        args.copy_code_min_num = []
    else:
        args.copy_code_min_num = [int(x.strip()) for x in copy_code_min_num]


def code_num_specification(codes, nums, code_type):
    if len(codes) == 0:
        codes, nums = [], []
    if len(codes) != len(nums):
        if len(nums) == 0:
            codes = []
            print('%s: len(args.limited_num) is 0, so limited_codes_cut not take effect !' % code_type)
        elif len(codes) != len(nums):
            nums = [nums[0]] * len(codes)
            print('%s : len(args.limited_codes) != len(args.limited_num), so only limited_num[0] take effect !' % code_type)
        else:
            pass
    nums = [int(x) for x in nums]
    return codes, nums

def main():
    args = parse_args()
    str_to_list(args)

    try:
        from pandarallel import pandarallel
        pandarallel.initialize(progress_bar=True) 
        print('Use multi threading !')
        is_pandarallel = True
    except:
        print('Use single threading !')
        is_pandarallel = False

    if len(args.anno_dirs) != len(args.img_dirs):
        args.anno_dirs = [x+'_xml' for x in args.img_dirs]
    assert len(args.img_dirs) == len(args.anno_dirs), " the length of img_dirs and anno_dirs must equals !"
    args.is_split = bool(args.is_split)

    # try:
    #     with open(args.no_classification_info_path, "r") as f:
    #         no_classification_info = toml.load(f)
    # except:
    #     no_classification_info = {}
    # args.no_classification_codes = no_classification_info.get('not_to_classify_train', [])
    # args.no_classification_codes = set(list(args.no_classification_codes))
    # args.train_delete_num = no_classification_info.get('train_delete_num', {})

    os.makedirs(args.save_dir, exist_ok=True)
    df_total = pd.DataFrame()
    # nolabel_count = 0
    length = len(args.img_dirs)
    for i in range(length):
        print('\nimgdir: ', args.img_dirs[i])
        img_dir = args.img_dirs[i]
        mask_dir = args.anno_dirs[i]
        code_name_list = os.listdir(img_dir)

        df = pd.DataFrame()
        df['image'] = glob.glob(os.path.join(img_dir, '*/*.jpg'))
        print('\ncur len: ', len(df))
        df['code'] = df['image'].parallel_apply(lambda x: Path(x).parent.name)

        if not bool(args.blur_code_train):
            df = df[~df['code'].isin(args.blur_code)]
        if not bool(args.no_defect_code_train):
            df = df[~df['code'].isin(args.no_defect_code)]
        print('\nafter del blur & no_defect code, cur len: ', len(df))
        df['img_normal'] = df['image'].parallel_apply(lambda x: img_check(x))
        df = df[df['img_normal']]
        df = df.drop(['img_normal'], axis=1)

        df['box'] = df.parallel_apply(lambda x: get_label_path(mask_dir, x['code'], x['image'], args.label_file_suffix, 
                                    args.blur_code_train, args.blur_code, args.no_defect_code_train, args.no_defect_code), axis=1)
        df = df[df['box']!='-1']

        df_total = pd.concat([df_total, df]).reset_index(drop=True)

    if args.use_color_transfer:
        from color_transfer import color_transfer
        df_total = color_transfer(df=df_total, img_col_name='image', code_col_name='code', out_dir=os.path.abspath(args.temp_dir), mean_std=[args.color_transfer_mean, args.color_transfer_std])

    save_file_names = file_name_check(args.save_file_names, args.is_split)

    try:
        with open(args.key_code_pr_path, 'r') as f:
            key_code_pr = json.load(f)
    except:
        print("\nkey_code_pr_path can't read !!!")
        key_code_pr = {}
    key_codes = key_code_pr.keys()
    
    args.limited_codes, args.limited_num = code_num_specification(args.limited_codes, args.limited_num, code_type='limit_code')
    key_codes, args.key_code_min_num = code_num_specification(key_codes, args.key_code_min_num, code_type='key_code')

    args.must_copy_codes, args.copy_code_min_num = code_num_specification(args.must_copy_codes, args.copy_code_min_num, code_type='must_copy_code')
    if len(args.must_copy_codes) > 0:
        must_copy_codes_bool = []
        for code in args.must_copy_codes:
            must_copy_codes_bool.append(True if code not in key_codes else False)
        args.must_copy_codes = [code for index,code in enumerate(args.must_copy_codes) if must_copy_codes_bool[index]]
        args.copy_code_min_num = [num for index,num in enumerate(args.copy_code_min_num) if must_copy_codes_bool[index]]

    print('all data shape: ', df_total.shape)
    if args.is_split:
        df_total = minmium_code_check(df_total, args.code_minimum_num)
        df_total = limited_codes_cut(df_total, args.limited_codes, args.limited_num)
        train_total, valid_total = train_test_split_by_code(df_total, test_size=args.val_ratio, random_state=752)
        print('train code:')
        print(train_total['code'].value_counts())
        print('val code:')
        print(valid_total['code'].value_counts())
        print('train data shape: ', train_total.shape)
        print('valid data shape: ', valid_total.shape)
        train_total = code_copy(train_total, key_codes, args.key_code_min_num, args.copy_type, args.copy_multiple)   
        train_total = code_copy(train_total, args.must_copy_codes, args.copy_code_min_num, args.copy_type, args.copy_multiple)
        # train_total = limited_codes_cut(train_total, args.limited_codes, args.limited_num)
        print('After copy, train data shape: ', train_total.shape)

        train_total.to_csv(os.path.join(args.save_dir, save_file_names[0]), index=False)
        valid_total.to_csv(os.path.join(args.save_dir, save_file_names[1]), index=False)
    else:
        print('all code:')
        print(df_total['code'].value_counts())
        df_total = code_copy(df_total, key_codes, args.key_code_min_num, args.copy_type, args.copy_multiple)   
        df_total = code_copy(df_total, args.must_copy_codes, args.copy_code_min_num, args.copy_type, args.copy_multiple)
        # df_total = limited_codes_cut(df_total, args.limited_codes, args.limited_num)
        # df_total = minmium_code_check(df_total, args.code_minimum_num)
        print('After copy, all data shape: ', df_total.shape)
        df_total.to_csv(os.path.join(args.save_dir, save_file_names[0]), index=False)


if __name__ == '__main__':
    main()
