from my_py_toolkit.file.file_toolkit import *
from collections import Counter
from shutil import move
from tqdm import tqdm
import traceback
from datasketch import MinHash, MinHashLSH
from collections import defaultdict
import Levenshtein as lev
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor

# ================================  根据编辑距离去重


def create_minhash(text, num_perm=128):
    m = MinHash(num_perm=num_perm)
    for d in text:
        m.update(d.encode('utf8'))
    return m
def cluster_texts_by_edit_distance_lsh(text_list, thr, num_perm=128, dis_fn=lev.distance):
    # 创建 MinHash 对象
    minhashes = {text: create_minhash(text, num_perm) for text in text_list}

    # 使用 LSH 查找候选相似对
    lsh = MinHashLSH(threshold=0.5, num_perm=num_perm)
    for text, minhash in minhashes.items():
        lsh.insert(text, minhash)

    clusters = defaultdict(list)
    for text in text_list:
        potential_matches = lsh.query(minhashes[text])
        cluster = [text]
        for other_text in potential_matches:
            if text != other_text and dis_fn(text, other_text) < thr:
                cluster.append(other_text)
        clusters[tuple(sorted(cluster))].append(text)

    return list(clusters.values())


def calculate_similarity(group, minhashes, thr):
    cluster = []
    for text in group:
        for other_text in group:
            if text != other_text and lev.distance(text, other_text) < thr:
                cluster.append(other_text)
    return cluster

def cluster_texts_by_edit_distance_lsh_mul_thr(text_list, thr, num_perm=128, num_threads=4):
    # 创建 MinHash 对象
    minhashes = {text: create_minhash(text, num_perm) for text in text_list}

    # 使用 LSH 查找候选相似对
    lsh = MinHashLSH(threshold=0.5, num_perm=num_perm)
    for text, minhash in minhashes.items():
        lsh.insert(text, minhash)

    clusters = defaultdict(list)
    with ThreadPoolExecutor(max_workers=num_threads) as executor:
        future_to_group = {executor.submit(calculate_similarity, lsh.query(minhashes[text]), minhashes, thr): text for text in text_list}
        for future in future_to_group:
            group = future_to_group[future]
            for other_text in future.result():
                if other_text not in clusters[group]:
                    clusters[group].append(other_text)

    return list(clusters.values())

# # 示例使用
# text_list = ["example", "exampel", "sampel", "apple", "aple"]
# thr = 2
# clusters = cluster_texts_by_edit_distance_lsh(text_list, thr)
# print(clusters)


# ================================  硬匹配去重

# def stastic_dup(datas, handle_desc_fn):
#     dup_analysis = {}
#     for id, data in datas.items():
#         # id = data['id']
#         desc = handle_desc_fn(data)
#         if desc not in dup_analysis:
#             dup_analysis[desc] = [id]
#         else:
#             dup_analysis[desc].append(id)
    
#     cts = Counter([len(v) for k,v in dup_analysis.items()])
#     cts = [(k,v) for k,v in cts.items()]
#     cts = sorted(cts, key=lambda x:x[0], reverse=True)
#     return cts

# def read_file_path_mapping(data_dirs, get_id_fn):
#     result = {}
#     for d in data_dirs:
#         for file in get_file_paths(d):
#             try:
#                 # fn = get_file_name(file)
#                 # id = int(fn.split('_')[0])
#                 id = get_id_fn(file)
#                 if id not in result:
#                     result[id] = [file]
#                 else:
#                     result[id].append(file)
#             except:
#                 print(f'{file}: {traceback.format_exc()}')
#     return result

# def remove_duplicates_files(datas, handle_data_fn, get_id_fn, ori_dirs, save_dir):
#     id_path_mapping = read_file_path_mapping(ori_dirs, get_id_fn)

#     # 重复内容归类
#     desc_analysis = {}
#     for id, data in datas.items():
#         # id = data['id']
#         desc = handle_data_fn(data)
#         if desc not in desc_analysis:
#             desc_analysis[desc] = [id]
#         else:
#             desc_analysis[desc].append(id)
    
#     # 重复性统计
#     cts = Counter([len(v) for k,v in desc_analysis.items()])
#     cts = [(k,v) for k,v in cts.items()]
#     cts = sorted(cts, key=lambda x:x[0], reverse=True)
#     print(f'重复性统计结果：{cts}')

#     # move 重复文件
#     for _, ids in tqdm(desc_analysis.items()):
#         for id in ids[1:]:
#             for file in id_path_mapping.get(id, []):
#                 if os.path.exists(file):
#                     fn = get_file_name(file)
#                     save_path = os.path.join(save_dir, fn)
#                     move(file, save_path)
#                 else:
#                     print(f"File not exist: {file}")
#     print(f'finished!')



