
from tqdm import tqdm
import os
import json
import hashlib
from multiprocessing import Process as mp
from multiprocessing import Pool
import traceback
import time
import argparse
import timeout_decorator
from prompts import ability_prompt, task_prompt, ability_prompt_eng, task_prompt_eng
from openai import OpenAI
import time
import threading  
import requests  
from transformers import AutoTokenizer

model_name_or_path = "/share/project/lijijie/tools/transfer_hf/Qwen1___5-72B-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,  trust_remote_code=True)

TIMEOUT = 1200

openai_api_key = "EMPTY"
openai_api_base = "http://localhost:8000/v1"

client = OpenAI(
    api_key=openai_api_key,
    base_url=openai_api_base,
)

@timeout_decorator.timeout(TIMEOUT)
def call_api(ipt):
    problem_prompt = ability_prompt_eng
    temp_instr = problem_prompt.format(prompt=ipt["prompt"])
    chat_response = client.chat.completions.create(
        model="Qwen1_5-72B-Chat",
        messages=[
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": temp_instr},
            ]
        )
    data = json.loads(chat_response.json())["choices"][0]["message"]["content"] 
    return data
    
def call_multiprocess(ipt_list, save_dir, filename, meta_func):
    save_path = os.path.join(save_dir, filename)
    if os.path.exists(save_path):
        raise Exception(f'Error: save_path [{save_path}] Already Exists!')     
    f = open(save_path, 'a+')
    for ipt in tqdm(ipt_list):
        try_count = 1
        while True:
            try:
                opt = meta_func(ipt)
                break
            except Exception as e:
                time.sleep(2)
                traceback.print_exc()
                try_count += 1
                if try_count == 7:
                    opt = ''
                    break
        ipt['label'] = {}
        ipt['label']['ability'] = opt
        ipt.pop("prompt",None)
        f.write(json.dumps(ipt, ensure_ascii=False) + '\n')
    f.close()
    print(f'INFO: [{save_path}] done.')
    return

def merge_results(output_dir, process_num):
    f = open(os.path.join(output_dir, 'all.jsonl'), 'w+')
    for idx in range(process_num):
        filepath = os.path.join(output_dir, f'{idx}.jsonl')
        lines = open(filepath).readlines()
        lines = [item.strip() for item in lines if len(item.strip()) > 0]
        for line in lines:
            f.write(line + '\n')
    f.close()

def run_multiprocess(ipt_func, meta_func, output_dir, process_num):
    ipt_list = ipt_func
    print(f'INFO: data size: [{len(ipt_list)}]')
    split_size = len(ipt_list) // process_num
    if len(ipt_list) % process_num != 0:
        split_size += 1
    p = Pool(process_num)
    for i in range(process_num):
        sub_data = ipt_list[
            split_size * i :
            split_size * (i+1)
        ]
        filename = f'{i}.jsonl'
        p.apply_async(call_multiprocess, args=(sub_data, output_dir, filename, meta_func))
    print('waiting for all subprocesses done ...')
    p.close()
    p.join()
    print('all processed done.')
    print('start merge ...')
    merge_results(output_dir, process_num)
    print('merge done.')

def reformat(dataset_ori):
    dataset_reformat = []
    for i in tqdm(range(len(dataset_ori))):
        dat_tmp = dataset_ori[i]
        tot_string = dataset_ori[i]["instruction"]
        L = tot_string
        contain_chn = False
        for char in tot_string:
            if u'\u4e00' <= char <= u'\u9fa5':
                contain_chn = True
                break      
        prompt = ''
        for j in range(1):
            if contain_chn:
                prompt += '用户问: ' + dataset_ori[i]["instruction"] + '\n' + '对话助手回答：'+dataset_ori[i]["output"] + "\n"
                #if j["role"] == 'user':
                #    prompt += '用户问: ' + j["content"] + '\n'
                #else:
                #    prompt += '对话助手回答：' + ' '.join(j["content"][-500:]) + '\n'    
            else:
                prompt += 'USER: ' + dataset_ori[i]["instruction"] + '\n' + 'ASSITANT: '+dataset_ori[i]["output"] + "\n"
                #if j['role'] == 'user':
                #    prompt += 'USER: ' + j["content"] + '\n'
                #else:
                #    prompt += 'ASSITANT: ' + ' '.join(j["content"].split()[-200:]) + '\n'
        if contain_chn:       
            prompt = prompt[:1000]    
        else:
            prompt = ' '.join(prompt.split(' ')[:400])
        dat_tmp['prompt'] = prompt
        dataset_reformat.append(dat_tmp)
    return dataset_reformat

def load_jsonl(args):
    with open(args.input_file, 'r') as f:
    #from datasets import load_dataset
    #dat = load_dataset("/share/project/yuyang/data_pro/data_version/gemma2-ultrafeedback-armorm",split="train")
        dat = json.load(f)
        #dat = [json.loads(line) for line in tqdm(f.readlines())]
    dat = reformat(dat)
    return dat

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_file", type=str,  required=True)
    parser.add_argument("--output_path", type=str, required=True)
    args = parser.parse_args()

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    ipt_list = load_jsonl(args)
    run_multiprocess(
        ipt_list,  
        call_api,  
        args.output_path, 
        16
    )
