import os
import sys
sys.path.append('/share/duli/utils')
sys.path.append('/share/project/duli/content_relation_ana/utils')
from utils import *
# import langid

from tqdm import tqdm
import random
import pdb
import re

def load_jsonls(dat_path):
    sample_ls = []
    file_ls = os.listdir(dat_path)
    for file in file_ls:
        if '0_300000' in file or '300000_600000' in file or '600000_900000' in file or '900000_1200000' in file:
            dat_tmp = load_jsonl(os.path.join(dat_path, file))
            sample_ls.extend(dat_tmp)
    return sample_ls

def detect_languages_langid(text):
    # 分段检测语言，返回所有段落的语言列表
    segments = text.split('\n')
    languages = [langid.classify(segment)[0] for segment in segments if segment.strip()]
    return set(languages)

def contains_ruleout_keywords(text, ruleout_kwds):
    for keyword in ruleout_kwds:
        if keyword in text.lower():
            return True
    return False

def contains_ruleout_tag(tag_ls, ruleout_tags):
    for tag in tag_ls:
        if tag in ruleout_tags:
            return True
    return False


random.seed(2024)
os.chdir('/share/project/duli/content_relation_ana/subset_gen/wildchat_logicot_metamath')
if False:
    sample_keys = load_jsonl('/share/project/duli/content_relation_ana/subset_gen/wildchat_logicot_metamath/sample_keys.jsonl')

    loss_base_sample_ls = load_jsonls('/share/project/duli/content_relation_ana/subset_gen/loss_selection/tot_qwen1.5-7B')
    loss_base_sample_dict = {(str(sample['id'])+sample['dataset']): sample for sample in loss_base_sample_ls}

    loss_after_sft_sample_ls = load_jsonls('/share/project/duli/content_relation_ana/subset_gen/follow_diffculty/tot_qwen1.5-7B')
    loss_after_sft_sample_dict = {(str(sample['id'])+sample['dataset']): sample for sample in loss_after_sft_sample_ls}

    sample_ls = []
    for sample_key in sample_keys:
        sample_key = str(sample_key['id'])+sample_key['dataset']
        sample = loss_base_sample_dict[sample_key]
        sample['loss_base'] = loss_base_sample_dict[sample_key]['loss_base']
        sample['loss_after_sft'] = loss_after_sft_sample_dict[sample_key]['loss_after_sft']
        sample['compression_ratio'] = sample['loss_after_sft'] / sample['loss_base']
        sample_ls.append(sample)

    ruleout_kwds = ['i am sorry', 'i\'m sorry', '抱歉']
    ruleout_tags = set(["过滤不良内容", "不适宜内容过滤", "内容过滤", "限制主题", "不适当内容检测", "拒绝生成"])

    sample_ls_n = []

    for sample in tqdm(sample_ls):
        #if count_non_alphabetic_chars(str(sample['content'])) <=5:
        if not contains_ruleout_keywords(str(sample['content']), ruleout_kwds):
            if not contains_ruleout_tag(sample['label']['ability'], ruleout_tags):
                if detect_languages_langid(str(sample['content'])).issubset({'en', 'zh'}):
                    if len(str(sample['content']).split()) > 7:
                        sample_ls_n.append(sample)
                    elif len(str(sample['content'])) > 80:
                        sample_ls_n.append(sample)

    save_jsonl(sample_ls_n, './tot_cleaned.jsonl')

sample_ls_n = load_jsonl('/share/project/duli/content_relation_ana/subset_gen/wildchat_logicot_metamath/tot_cleaned.jsonl')
NUM_LOGI_COT = 20000
NUM_MATH = 10000
NUM_WILDCHAT = 120000

# NUM_MATH = 5000
# NUM_WILDCHAT = 120000

sample_metamath = [sample for sample in sample_ls_n if sample['dataset']=='metamath']
sample_logicot = [sample for sample in sample_ls_n if sample['dataset']=='logi_cot']
sample_wildchat = [sample for sample in sample_ls_n if sample['dataset']=='wildchat']

'''
filtering metamath
'''
sample_metamath = [sample for sample in sample_ls_n if '####' not in str(sample['content'])]
sample_metamath = random.sample(sample_metamath, NUM_MATH)
pdb.set_trace()
'''
sampling logi_cot
'''
sample_logicot = random.sample(sample_logicot, NUM_LOGI_COT)

'''
filtering wildchat
'''

# 因为多轮有价值，这里多选一点多轮的
weight_dict = {
    'tag_diversity_weight': 0.25, 
    'base_loss_weight': 0.25,
    'loss_after_sft_weight': 0.25,
    'compression_ratio_weight': 0.25
}

def sampling(sample_ls, weight_dict, tot_num):
    sample_ls = sorted(sample_ls, reverse=True, key=lambda x: len(x['label']['ability']))
    sample_num = int(weight_dict['tag_diversity_weight'] * tot_num)
    keep_sample_ls = sample_ls[:sample_num]
    sample_ls = sample_ls[sample_num:]  
    sample_ls = sorted(sample_ls, reverse=True, key=lambda x: x['loss_base'])
    sample_num = int(weight_dict['base_loss_weight'] * tot_num)
    keep_sample_ls += sample_ls[:sample_num]
    sample_ls = sample_ls[sample_num:]
    sample_ls = sorted(sample_ls, reverse=True, key=lambda x: x['loss_after_sft'])
    sample_num = int(weight_dict['loss_after_sft_weight'] * tot_num)
    keep_sample_ls += sample_ls[:sample_num]
    sample_ls = sample_ls[sample_num:]
    sample_ls = sorted(sample_ls, reverse=True, key=lambda x: x['compression_ratio'])
    sample_num = int(weight_dict['loss_after_sft_weight'] * tot_num)
    keep_sample_ls += sample_ls[:sample_num]
    sample_ls = sample_ls[sample_num:]
    return keep_sample_ls


sample_wildchat_single = [sample for sample in sample_wildchat if sample['meta']['multiturn'] == False]
sample_wildchat_multi = [sample for sample in sample_wildchat if sample['meta']['multiturn'] == True]

sample_wildchat_single = sampling(sample_wildchat_single, weight_dict, NUM_WILDCHAT/2)
sample_wildchat_multi = sampling(sample_wildchat_multi, weight_dict, NUM_WILDCHAT/2)

sample_tot = sample_metamath + sample_logicot + sample_wildchat_single + sample_wildchat_multi
# sample_tot = sample_wildchat_single + sample_wildchat_multi

# sample_tot = sample_metamath + sample_wildchat_single + sample_wildchat_multi

random.shuffle(sample_tot)

for sample in sample_tot:
    conversations = []
    for i in sample['content']:
        if i['role'] == 'user':
            conversations.append({'from': 'human', 'value': i['content']})
        if i['role'] == 'assistant':
            conversations.append({'from': 'gpt', 'value': i['content']})
    sample['conversations'] = conversations

save_jsonl(sample_tot, './resampled_0712.jsonl')
# save_jsonl(sample_tot, './re_sampled_decrease_wildchat.jsonl')
