import os
import sys
sys.path.append('/share/duli/utils')
sys.path.append('/share/project/duli/content_relation_ana/utils')
from utils import *
from collections import Counter
import random
from tqdm import tqdm
import re
import pdb

def contains_chinese(str):
    return bool(re.search(r'[\u4e00-\u9fff]', str))

def calculate_tag_and_cate_dataset_counts(samples):
    tag_ls = []
    cate_ls = []
    dataset_ls = []
    for sample in samples:
        tag_ls.extend(sample['label']['ability'])
        sample['cate_ability'] = [i.strip() for i in sample['cate_ability']]
        cate_ls.extend(sample['cate_ability'])
        dataset_ls.append(sample['dataset'])
    return tag_ls, cate_ls, dataset_ls

def find_most_sim_instances(source, target_ls, num=100):
    res = {}
    for i, target in enumerate(target_ls):
        common_tag_num = len(set(source['label']['ability']).intersection(target['label']['ability']))
        res[i] = common_tag_num
    res = sorted(list(res.items()), reverse=True, key=lambda x:x[1])
    res = res[:num]
    return [target_ls[i[0]] for i in res]

def load_tot_dat(dat_path):
    tot_sample_ls = []
    file_ls = os.listdir(dat_path)
    for file in file_ls:
        if '.jsonl' in file:
            try:
                #dat_tmp = load_jsonl(os.path.join(dat_path, file, 'cleaned_ability.jsonl'))
                dat_tmp = load_jsonl(os.path.join(dat_path, file))
                tot_sample_ls.extend(dat_tmp)
            except:
                pass
    return tot_sample_ls

# sample_ls = load_jsonl('/share/duli/sft/content_relation_ana/tag_clean_and_normalization/normalized/tot.jsonl')
dat_path = '/share/project/duli/content_relation_ana/subset_gen/follow_diffculty/tot_qwen1.5-7B'
sample_ls = load_tot_dat(dat_path)
# sample_ls = [i for i in sample_ls if not contains_chinese(i['content'][0]['content']) and not contains_chinese(i['content'][1]['content'])]
sample_ls = [i for i in sample_ls if contains_chinese(i['content'][0]['content']) and contains_chinese(i['content'][1]['content'])]

#sample_ls = load_jsonl('/share/duli/sft/content_relation_ana/subset_gen/follow_diffculty/combined.jsonl')
sample_dict = {str(sample['id'])+sample['dataset']:sample for sample in sample_ls}

benchmark_path = '/share/project/duli/content_relation_ana/subset_gen/eval_tag_distrib_align/normalized'
benchmark_sample_ls = []
benchmark_path_ls = os.listdir(benchmark_path)

for sub_path in benchmark_path_ls:
    benchmark_sample_tmp = load_jsonl(os.path.join(benchmark_path, sub_path, 'cleaned_ability.jsonl'))
    benchmark_sample_ls.extend(benchmark_sample_tmp)
    tag_ls, cate_ls, dataset_ls = calculate_tag_and_cate_dataset_counts(benchmark_sample_ls)
    tag_counter = Counter(tag_ls)
    cate_counter = Counter(cate_ls)
    dataset_counter = Counter(dataset_ls)
    print(tag_counter)
    print(cate_counter)

tag_ls, cate_ls, dataset_ls = calculate_tag_and_cate_dataset_counts(sample_ls)

tag_counter = Counter(tag_ls)
cate_counter = Counter(cate_ls)
dataset_counter = Counter(dataset_ls)

sim_sample_ls = []

sample_ls_tmp = sample_ls[:]
for source in tqdm(benchmark_sample_ls):
    num = 400 
    if '编程与软件开发' in source['cate_ability'] or '数学能力' in source['cate_ability'] or '逻辑推理' in source['cate_ability']:
        num = 300
    else:
        num = 200
    sim_sample_ls_tmp = find_most_sim_instances(source, sample_ls_tmp, num)
    sim_sample_ls.extend(sim_sample_ls_tmp)
    sample_dict_used = {str(sample['id'])+sample['dataset']:sample for sample in sim_sample_ls}
    sample_dict = {k:sample_dict[k] for k in sample_dict if k not in sample_dict_used}
    sample_ls_tmp = [i for i in sample_dict.values()]
    print(len(sample_ls_tmp))
    #pdb.set_trace()
    

#save_jsonl(sim_sample_ls, '/share/duli/sft/content_relation_ana/subset_gen/eval_tag_distrib_align/sim_samples.jsonl')
#save_jsonl(sim_sample_ls, '/share/duli/sft/content_relation_ana/subset_gen/eval_tag_distrib_align/sim_samples_with_addi_math_and_code.jsonl')
#save_jsonl(sim_sample_ls, '/share/project/duli/content_relation_ana/subset_gen/eval_tag_distrib_align/sim_samples_modified.jsonl')
save_jsonl(sim_sample_ls, '/share/project/duli/content_relation_ana/subset_gen/eval_tag_distrib_align/sim_samples_modified_chn.jsonl')

# def find_most_sim_instances_parallel(benchmark_sample_ls):
#     benchmark_sample_ls, sample_ls = benchmark_sample_ls[0], benchmark_sample_ls[1]
#     sim_sample_ls = []
#     print('finding')
#     for source in tqdm(benchmark_sample_ls):
#         num = 400 
#         sim_sample_ls_tmp = find_most_sim_instances(source, sample_ls, num)
#         sim_sample_ls.extend(sim_sample_ls_tmp)
#     return sim_sample_ls

# import multiprocessing
# NUM_CPU = 10
# sublist = []
# for i in range(NUM_CPU):
#     sublist.append([benchmark_sample_ls[(42*i):(42*(i+1))], sample_ls])

# with multiprocessing.Pool(processes=NUM_CPU) as pool:
#     sim_sample_ls_ls = pool.map(find_most_sim_instances_parallel, sublist)

# sim_sample_ls = [j for i in sim_sample_ls_ls for j in i]
# save_jsonl(sim_sample_ls, '/share/duli/sft/content_relation_ana/subset_gen/eval_tag_distrib_align/sim_samples_with_addi_math_and_code.jsonl')