from my_py_toolkit.file.file_toolkit import *
from my_py_toolkit.data_clean.remove_duplicates import cluster_texts_by_edit_distance_lsh
from collections import Counter
from shutil import move
from tqdm import tqdm
import Levenshtein as lev
import traceback


def stastic_dup(datas, handle_data_fn, thr_edis_dis=2, dis_fn=lev.distance):
    dup_res = []
    all_text = {}
    for id, data in datas.items():
        d_h = handle_data_fn(data)
        if d_h not in all_text:
            all_text[d_h] = [id]
        else:
            all_text[d_h].append(id)
    res_cluster = cluster_texts_by_edit_distance_lsh(list(all_text), thr_edis_dis, dis_fn=dis_fn)
    
    for vs in res_cluster:
        cur = []
        for v in vs:
            cur.extend(all_text[v])
        
        dup_res.append(cur)
    
    # 重复性统计
    cts = Counter([len(v) for v in dup_res])
    cts = [(k,v) for k,v in cts.items()]
    cts = sorted(cts, key=lambda x:x[0], reverse=True)
    print(f'重复性统计结果：{cts}')
    return cts

def read_file_path_mapping(data_dirs, get_id_fn):
    result = {}
    for d in data_dirs:
        for file in get_file_paths(d):
            try:
                # fn = get_file_name(file)
                # id = int(fn.split('_')[0])
                id = get_id_fn(file)
                if id not in result:
                    result[id] = [file]
                else:
                    result[id].append(file)
            except:
                print(f'{file}: {traceback.format_exc()}')
    return result


def remove_duplicates_files(datas, handle_data_fn, get_id_fn, ori_dirs, save_dir, thr_edis_dis=2, allow_dup=1, dis_fn=lev.distance):
    id_path_mapping = read_file_path_mapping(ori_dirs, get_id_fn)

    dup_res = []
    all_text = {}
    for id, data in datas.items():
        d_h = handle_data_fn(data)
        if d_h not in all_text:
            all_text[d_h] = [id]
        else:
            all_text[d_h].append(id)
    res_cluster = cluster_texts_by_edit_distance_lsh(list(all_text), thr_edis_dis, dis_fn=dis_fn)
    
    for vs in res_cluster:
        cur = []
        for v in vs:
            cur.extend(all_text[v])
        
        dup_res.append(cur)
    
    # 重复性统计
    cts = Counter([len(v) for v in dup_res])
    cts = [(k,v) for k,v in cts.items()]
    cts = sorted(cts, key=lambda x:x[0], reverse=True)
    print(f'重复性统计结果：{cts}')

    # move 重复文件
    for ids in tqdm(dup_res):
        for id in ids[allow_dup:]:
            for file in id_path_mapping.get(id, []):
                if os.path.exists(file):
                    fn = get_file_name(file)
                    save_path = os.path.join(save_dir, fn)
                    move(file, save_path)
                else:
                    print(f"File not exist: {file}")
    print(f'finished!')