import os
import sys
sys.path.append('/nvme/share/shellm')

import glob
import json
import torch
import pandas as pd 

from tqdm import tqdm
from megatron.data import indexed_dataset

scored = True
if scored:
    LANGUAGES = ["java", "markdown", "javascript", "sql", "python", "c", "cpp", "applescript", "antlr", "assembly"]
else:
    LANGUAGES = []
prob_dict = json.load(open('/shd/fanggx/Paper/WithZzr/Gengerate/Prob.json', 'r'))[2]['10B']

OUTPUT_MERGED = "/shd/fanggx/Paper/WithZzr/Gengerate/scripts/merge_of_scored/output"
os.makedirs(f'{OUTPUT_MERGED}', exist_ok=True)
idx_path  = f'{OUTPUT_MERGED}/merged.idx'
bin_path  = f'{OUTPUT_MERGED}/merged.bin'
meta_path = f'{OUTPUT_MERGED}/metas.pickle'
builder   = indexed_dataset.make_builder(bin_path, impl='mmap', vocab_size=32016)

bar = tqdm(total = 10 * 1000000000)

ids_dir_root = "/shd/fanggx/Paper/WithZzr/Gengerate/the_stack/feather/output_ids"
ids_dir_pattern = ids_dir_root + "/{lan}/codellama.encoded/{part}.feather"
lan_list = sorted([p for p in os.listdir(ids_dir_root) if '.' not in p])

get_part_n = lambda x: x.split('/')[-1].split('.')[0]
for lan in lan_list:
    EXIT_FLAG = False
    cur_program_num = 0
    
    if lan in LANGUAGES:
        scored_dir_pattern = "/shd/fanggx/Paper/WithZzr/Rater/score/output/{lan}/*.feather"
        scored_dir = scored_dir_pattern.format(lan=lan)
        socre_files_path_list = sorted([p for p in glob.glob(scored_dir)], reverse=True)
        
        # 1. 如果是需要的语言，则使用 score 为 1 的数据进行 Merge
        for path in socre_files_path_list:

            # i. 获取 part_n 并得到相应的 idx 数据路径
            part_n = get_part_n(path)
            ids_path = ids_dir_pattern.format(lan=lan, part=part_n)

            # ii. 读取 score 数据以及 idx 数据
            df_score = pd.read_feather(path)
            df_ids = pd.read_feather(ids_path)

            # iii. 抽取 label 为 1 的数据并索引回 idx 然后 shuffle
            idx_list = df_score.index[df_score['1'] > 0.51].tolist()
            filtered_df_ids = df_ids.loc[idx_list]
            shuffled_filtered_df_ids = filtered_df_ids.sample(frac=1)

            for _, row in shuffled_filtered_df_ids.iterrows():
                builder.add_item(torch.IntTensor(row['input_ids']))
                builder.end_document()
                
                bar.update(len(row['input_ids']))
                bar.set_description('Program: {}'.format(lan))
                cur_program_num += len(row['input_ids'])

                if cur_program_num > prob_dict[lan]:
                    EXIT_FLAG = True
                    break
            if EXIT_FLAG:
                break
    else:
        # 2. 否则随机进行 Merge
        
        # i. 倒序获得原数据
        ids_path_list = sorted([p for p in glob.glob(ids_dir_pattern.format(lan=lan, part="*")) if 'meta' not in p], reverse=True)
        for ids_path in ids_path_list:
            df_ids = pd.read_feather(ids_path)
            shuffled_df_ids = df_ids.sample(frac=1)

            for _, row in shuffled_df_ids.iterrows():
                builder.add_item(torch.IntTensor(row['input_ids']))
                builder.end_document()
                
                bar.update(len(row['input_ids']))
                bar.set_description('Program: {}'.format(lan))
                cur_program_num += len(row['input_ids'])

                if cur_program_num > prob_dict[lan]:
                    EXIT_FLAG = True
                    break
            if EXIT_FLAG:
                break

builder.finalize(idx_path)