import os
import numpy as np
import pandas as pd
import glob
from pathlib import Path
import cv2
import sys
sys.path.append('..')
from utils.json import json_to_image
from tqdm import tqdm
tqdm.pandas(desc='apply')

def get_product(img_path):
    return Path(img_path).parent.parent.name
    # ps = img_path.split('/')[:-1]
    # for p in ["T7001", "T7002", "T6001", "T6002", "T6003", "T6004", "T6005", "T6006", "T6007", "T6008", "T6009", "T6010", "55D17", "98D01", "85MMG_85", "85MMG_22"]:
    #     if p in ps:
    #         return p


def dataset_split_func(data, codes, by, random_state, test_size):
    train_df = pd.DataFrame()
    val_df = pd.DataFrame()
    for code in codes:
        tmp = data[data[by]==code].reset_index(drop=True)
        tmp = tmp.sample(frac=1, random_state=random_state).reset_index(drop=True)
        len_train = int(len(tmp) * (1-test_size))
        if len_train == 0:
            train_df = pd.concat([train_df, tmp[:]])
        else:
            train_df = pd.concat([train_df, tmp[:len_train]])
            val_df = pd.concat([val_df, tmp[len_train:]])
    return train_df, val_df

def my_train_test_split(data, test_size=0.1, by='code', by2=None, random_state=752):
    codes = np.unique(data[by])
    train_df = pd.DataFrame()
    val_df = pd.DataFrame()
    if (by2 is not None) and (by2 in data.columns):
        by2_unique = np.unique(data[by2])
        for x in by2_unique:
            tmp1 = data[data[by2]==x].reset_index(drop=True)
            tmp2 = data[data[by2]!=x].reset_index(drop=True)
            if len(tmp1) == 0:
                continue
            tmp_train_df, tmp_val_df = dataset_split_func(tmp1, codes, by, random_state, test_size)
            train_df = pd.concat([tmp_train_df, train_df])
            val_df = pd.concat([tmp_val_df, val_df])
    else:
        train_df, val_df = dataset_split_func(data, codes, by, random_state, test_size)

    return train_df, val_df


def search_json(img_path, label_path, label_suffix, no_defect_codes):
    code = str(Path(img_path).parent.name)
    if code in no_defect_codes:
        return '0'
    img = cv2.imread(img_path, 1)
    if label_suffix != '' and label_suffix != '.':
        label_file_name = Path(img_path).with_suffix(label_suffix).name
        label_file_path = os.path.join(label_path, code, label_file_name)
        if os.path.exists(label_file_path):
            if label_suffix == '.json':
                try:
                    mask = json_to_image(img.shape, label_file_path).astype('uint8')
                    return label_file_path
                except:
                    return 'INVALID'
            else:
                return label_file_path
        elif code not in no_defect_codes:
            return 'INVALID'
        return '0'
    else:
        label_file_name = Path(img_path).with_suffix('.json').name
        label_file_path = os.path.join(label_path, code, label_file_name)
        if os.path.exists(label_file_path):
            try:
                mask = json_to_image(img.shape, label_file_path).astype('uint8')
                return label_file_path
            except:
                return 'INVALID'
        else:
            label_file_name = Path(img_path).with_suffix('.png').name
            label_file_path = os.path.join(label_path, code, label_file_name)
            if os.path.exists(label_file_path):
                try:
                    mask = cv2.imread(label_file_path, 0)
                    return label_file_path
                except:
                    return 'INVALID'
            else:
                return "INVALID"



def code_limit_exec(df, memo):
    for code in memo:
        df1 = df[df['code'] != code]
        df2 = df[df['code'] == code]
        if len(df2) > memo[code]:
            df2 = df2.sample(frac=1, random_state=10).reset_index(drop=True).iloc[:memo[code]]
            df = pd.concat([df1, df2]).reset_index(drop=True)
    return df

def img_copy(df, memo):
    exists_codes = set(df['code'].tolist())
    for code in memo:
        if code not in exists_codes:
            continue
        df1= df[df['code']==code]
        df2= df[df['code']!=code]

        df1 = df1.sample(frac=1, random_state=10).reset_index(drop=True)

        times = (memo[code] - len(df1)) // len(df1)
        carry = memo[code] % len(df1)

        tmp_df = df1.iloc[:carry, :].copy()
        for _ in range(times):
            tmp_df = pd.concat([tmp_df, df1.copy()])
        df1 = pd.concat([tmp_df, df1])

        df = pd.concat([df1, df2])
    return df

def main():
    try:
        from pandarallel import pandarallel
        pandarallel.initialize(progress_bar=True) 
        print('Use multi threading !')
        is_pandarallel = True
    except:
        print('Use single threading !')
        is_pandarallel = False
    
        

    ###########################################################################################
    img_paths = [
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T643B05/data_accumulation',
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T643B05/original_data_20250311_reshape',
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T643D02/data_accumulation',
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T643D02/original_data_20250307_reshape',
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T665D07/data_accumulation',
        r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/T6_Vtech/T665D07/original_data_reshape_20250306'
    ]
    
    # img_paths = []
    # label_paths = None
    # root_p = r'/data2/autorepair/ruanzhifeng/autorepair_t7_10/t10/electrical_data'
    # products = os.listdir(root_p)

    # # elec
    # for p in products:
    #     if p not in ['270D5', '270F', '315F5', '315F6', '315FA', '571Z', '397T1', "365F1", "520T1" ]: continue
    #     for folder in os.listdir(os.path.join(root_p, p)):
    #         # if 'test' in folder or 'no_json' in folder or "_xml" in folder:
    #             # continue
    #         img_paths.append(os.path.join(root_p, p, folder))

    # # optic  
    # for p in products:
    #     img_paths.append(os.path.join(root_p, p))

    label_paths = img_paths[:]

    if label_paths is None or label_paths == []:
        label_paths = img_paths[:]
    assert len(img_paths)==len(label_paths), 'len of img_paths and label_paths not equals !!!'

    no_defect_codes = ['OK', 'OK-eleback-50X-Charm', 'OK-elefront-50X-Charm', 'OK_back', 'OK_front', "TSFAS", "TGXID"]
    dataset_split = True
    val_ratio = 0.15
    img_suffix = '.jpg'
    label_suffix = ''

    code_num_limits = {}
    code_copy_memo = {}
    ###########################################################################################

    df = pd.DataFrame()

    for i, (img_path, label_path) in enumerate(zip(img_paths, label_paths)):
        print("\n", img_path)
        tmp_df = pd.DataFrame()
        tmp_df['image'] = glob.glob(os.path.join(img_path, '*/*'+img_suffix))
        tmp_df['code'] = tmp_df['image'].apply(lambda x: Path(x).parent.name)  # parallel_apply
        tmp_df['product'] = tmp_df['image'].apply(lambda x: get_product(x))  # parallel_apply
        tmp_df['label'] = tmp_df['image'].parallel_apply(lambda x: search_json(img_path=x, label_path=label_path, label_suffix=label_suffix, no_defect_codes=no_defect_codes))
        tmp_df = tmp_df[tmp_df['label']!='INVALID']

        df = pd.concat([df, tmp_df]).reset_index(drop=True)

    df = code_limit_exec(df, code_num_limits)

    os.makedirs('../datasets/csv', exist_ok=True)
    if dataset_split:
        train_df, val_df = my_train_test_split(data=df, test_size=val_ratio, by2='product')
        train_df = img_copy(train_df, code_copy_memo)
        print('\ntrain len: ', len(train_df))
        print(train_df['code'].value_counts())

        train_df.to_csv(r'../datasets/csv/AA_Vtech/train_20250614.csv', index=False)
        val_df.to_csv(r'../datasets/csv/AA_Vtech/val_20250614.csv', index=False)
    else:
        print('\nlen: ', len(df))
        print(df['code'].value_counts())
        df.to_csv(r'../datasets/csv/AA_Vtech/all_20250614.csv', index=False)


if __name__=='__main__':
    main()
