import os
import sys
sys.path.append('/share/duli/utils')
sys.path.append('/share/project/duli/content_relation_ana/utils')
from utils import *
from collections import Counter
import random
import pdb

# dat_path = '/share/duli/sft/content_relation_ana/tag_clean_and_normalization/normalized'
# sample_ls = []
# file_ls = os.listdir(dat_path)
# for file in file_ls:
#     if 'sft_ability' in file:
#         try:
#             dat_tmp = load_jsonl(os.path.join(dat_path, file, 'cleaned_ability.jsonl'))
#             sample_ls.extend(dat_tmp)
#         except:
#             pass

def calculate_tag_and_cate_dataset_counts(samples):
    tag_ls = []
    cate_ls = []
    dataset_ls = []
    for sample in samples:
        try:
            tag_ls.extend(sample['label']['ability'])
            sample['cate_ability'] = [i.strip() for i in sample['cate_ability']]
            cate_ls.extend(sample['cate_ability'])
            dataset_ls.append(sample['dataset'])
        except:
            pass
    return tag_ls, cate_ls, dataset_ls

#sample_ls = load_jsonl('/share/duli/sft/content_relation_ana/tag_clean_and_normalization/normalized/tot.jsonl')
sample_ls = load_jsonl('/share/project/duli/content_relation_ana/tag_clean_and_normalization/normalized/tot.jsonl')

# sample_ls = [i for i in sample_ls if not (i['dataset'] == 'code_contest' and len(i['content'][0]['content']) < 100)]

# new_dat = []
# file_ls = ['/share/duli/sft/sft_dataset_collections/baize/reformat.jsonl', '/share/duli/sft/sft_dataset_collections/belle_multiturn_chat/reformat.jsonl']
# for file in file_ls:
#     dat_tmp = load_jsonl(file)
#     new_dat.extend(dat_tmp)

# tmp = []
# map_key = {(i['dataset']+str(i['id'])):ith for ith, i in enumerate(new_dat)}
# for sample in sample_ls:
#     key = sample['dataset'] + str(sample['id'])
#     if key in map_key:
#         sample['content'] = new_dat[map_key[key]]['content']
#         tmp.append(sample['dataset'])

# print(set(tmp))

# err_dat_ls = []
# err_sample_ls = []
# for sample in sample_ls:
#     content = sample['content']
#     roles = []
#     for i in content:
#         roles.append(i['role'])
#     if len(set(roles)) != 2:
#         err_dat_ls.append(sample['dataset'])
#         err_sample_ls.append(sample)

# set(err_dat_ls)
# sample_ls = load_jsonl('/share/project/duli/content_relation_ana/subset_gen/follow_diffculty/sft_base_100w_v0513_highloss_cate_200000_high_followdifficulty_cate_200000.jsonl')

tag_ls, cate_ls, dataset_ls = calculate_tag_and_cate_dataset_counts(sample_ls)

tag_counter = Counter(tag_ls)
cate_counter = Counter(cate_ls)
dataset_counter = Counter(dataset_ls)

# Include all tags with freq [20, 300] as long tailed tags
keep_tag_set = set([k for k,i in tag_counter.items() if (i >= 20 and i < 200)]) 
part_keep_tag_set = set([k for k,i in tag_counter.items() if (i >= 200 and i < 500)])

keep_id_ls = []
tot_id_ls = [i for i in range(len(sample_ls))]

# Inculde the keep_tag_set
for i in range(len(sample_ls)):
    tag_set_tmp = set(sample_ls[i]['label']['ability'])
    if len(tag_set_tmp & keep_tag_set) > 0:
        keep_id_ls.append(i)

print('Inculde the keep_tag_set',  len(keep_id_ls))
tot_id_ls = list(set(tot_id_ls).difference(keep_id_ls)) # remove the included instances 

# Inculde the part_keep_tag_set
for i in range(len(sample_ls)):
    tag_set_tmp = set(sample_ls[i]['label']['ability'])
    if len(tag_set_tmp & part_keep_tag_set) > 0:
        if random.random() > 0.33:
            keep_id_ls.append(i)

print('Inculde the part_keep_tag_set', len(keep_id_ls))

tot_id_ls = list(set(tot_id_ls).difference(keep_id_ls)) # remove the included instances 

# Include the instances with more than 4 tags
for i in tot_id_ls:
    # if len(sample_ls[i]['label']['ability']) > 3:
    if len(sample_ls[i]['label']['ability']) > 4:    
        keep_id_ls.append(i)

keep_id_ls = list(set(keep_id_ls)) 
tot_id_ls = list(set(tot_id_ls).difference(keep_id_ls)) # remove the included instances again

print('Include the instances with more than 4 tags', len(keep_id_ls))

# Count the number of instances within each category
cate_ls_keep = []
for i in keep_id_ls:
    cate_ls_keep.extend(sample_ls[i]['cate_ability'])

cate_counter_keep = Counter(cate_ls_keep)

# Set the number that each category should supplement
cate_ratio = {'编程与软件开发': 0.15, '数据科学与分析':0.05, '数学能力':0.15, '自然语言处理与理解':0.1, '任务生成':0.03,'教育与咨询':0.02, '逻辑与推理':0.1, '创意与设计':0.03, '问题解答与支持':0.02, '信息处理与整合':0.03, '沟通与社交媒体':0.03, '项目与任务管理':0.015, '分析与研究':0.015, '语言学知识、多语言与多元文化理解':0.05, 'STEM知识':0.03, '人文历史哲学与社会学知识':0.02, '财务、金融与商业知识':0.01, '生活知识与技能':0.01, '法律知识':0.01, '文学创作与艺术知识':0.01, '医学、药学与健康知识':0.01, '政治、军事战略与安全知识':0.001, '开放任务完成':0.05, '开放知识问答':0.04}

cate_sample_num = {k:(cate_ratio[k] * 1.6e6 - cate_counter_keep[k]) for k in cate_ratio.keys()}

# Add samples to each category
tot_cate_id_ls = {k:[] for k in cate_counter.keys()}
for i in tot_id_ls:
    cate_random = random.sample(sample_ls[i]['cate_ability'], len(sample_ls[i]['cate_ability']))
    tot_cate_id_ls[cate_random[0]].append(i)

for k in cate_sample_num.keys():
    if int(cate_sample_num[k]) > 0:
        try:
            keep_id_ls.extend(random.sample(tot_cate_id_ls[k], min(int(cate_sample_num[k]), len(tot_cate_id_ls[k])) ))
        except:
            import pdb
            pdb.set_trace()

print('adjust category proportion', len(keep_id_ls))
keep_sample_ls = [sample_ls[i] for i in keep_id_ls]
tag_ls, cate_ls, dataset_ls = calculate_tag_and_cate_dataset_counts(keep_sample_ls)

# Counter(tag_ls)
print(Counter(cate_ls))
print(Counter(dataset_ls))

# Controlling the number of each dataset 
for i in tot_id_ls:
    sample = sample_ls[i]
    if sample['dataset'] in ['wizardlm_evol_instruct_v2_196k', 'alpaca_gpt4_zh', 'alpaca_gpt4', 'dolly', 'lima', 'Wizard_evol_instruct_zh', 'Code Alpaca 20K', 'coig-cqia']:
        keep_id_ls.append(i)
    elif sample['dataset'] in ['metamath']:
        indicator = random.random()
        if indicator > 0.5:
            keep_id_ls.append(i)

keep_id_ls = list(set(keep_id_ls))
print('including high quality dataset', len(keep_id_ls))

keep_id_ls = list(set(keep_id_ls))
keep_id_ls_n = []
for i in keep_id_ls:
    sample = sample_ls[i]
    if sample['dataset'] == "belle_train_3.5m_cn":
        indicator = random.random()
        if indicator > 0.45:
            keep_id_ls_n.append(i)
    else:
        keep_id_ls_n.append(i)

print('filtering out belle_train_3.5m_cn', len(keep_id_ls_n))
keep_sample_ls = random.sample(keep_sample_ls, 1e6)
keep_sample_ls = [sample_ls[i] for i in keep_id_ls_n]

#out_path = '/share/duli/sft/content_relation_ana/subset_gen/sft_base_100w.jsonl'
out_path = '/share/project/duli/content_relation_ana/subset_gen/sft_base_100w_v0429.jsonl'

save_jsonl(keep_sample_ls, out_path)

tag_ls, cate_ls, dataset_ls = calculate_tag_and_cate_dataset_counts(keep_sample_ls)

# Counter(tag_ls)
print("## BASE INSTANCES")
print(Counter(cate_ls))
print(Counter(dataset_ls))

# if False:
# high_loss = load_jsonl('/share/project/duli/content_relation_ana/subset_gen/additional_high_loss_instances_v0429.jsonl')

# keep_sample_ls = keep_sample_ls + high_loss

# tag_ls, cate_ls, dataset_ls = calculate_tag_and_cate_dataset_counts(keep_sample_ls)

# # Counter(tag_ls)
# print("## WITH HIGH LOSS INSTANCES")
# print(Counter(cate_ls))
# print(Counter(dataset_ls))


# # samples = load_jsonl('/share/project/duli/content_relation_ana/subset_gen/high_loss_added.jsonl')

# tag_ls, cate_ls, dataset_ls = calculate_tag_and_cate_dataset_counts(samples)

# # Counter(tag_ls)
# print("## WITHOUT LOW LOSS INSTANCES")
# print(Counter(cate_ls))
# print(Counter(dataset_ls))
