import argparse
import random
import traceback
import json
import jieba
from multiprocessing import Pool
import os
from tqdm import tqdm
import time
from utils import load_dataset, clean_instruction
from openai_generate import get_llm_response
from prompt_functions import *
from loguru import logger

logger.add('logs/los_{}'.format(time.time()))

breadth_functions = {"Breadth": createBreadthPrompt}

depth_functions = {
            "Constraints": createConstraintsPrompt,
            "Deepen": createDeepenPrompt,
            "Concretizing": createConcretizingPrompt,
            "Reasoning": createReasoningPrompt}
evol_functions = breadth_functions | depth_functions

def llm_generate(prompt):
    try_count = 1
    while True:
        try:
            response = get_llm_response(prompt)
            break
        except:
            time.sleep(2)
            traceback.print_exc()
            print(f'INFO: retry [{prompt}], try count [{try_count}]')
            try_count += 1
            if try_count == 5:
                response = ''
                break
    return response

def select_prompt(instruction, function_names):
    function = evol_functions[random.choice(function_names)]
    return function(instruction), random.choice(function_names)

def instruction_evolver(instruction, function_names=None):
    if function_names is None:
        function_names = list(evol_functions.keys())
    prompt, function = select_prompt(instruction, function_names)
    logger.info("input:{}".format(repr(prompt)))
    return llm_generate(prompt), function

def instruction_eliminator(original_instruction, evolved_instruction, response):
    # Rule 1: The evolved instruction does not provide any information gain compared to the original one.
    if original_instruction == evolved_instruction:
        return False, "Equal"
    # if self.eliminator_model:
    prompt = createComparisonEliminatorPrompt(original_instruction, evolved_instruction)
    logger.info("input:{}".format(repr(prompt)))
    comparison_response = llm_generate(prompt)
    logger.info("output:{}".format(repr(comparison_response)))
    if "equal" in comparison_response.lower():
        return False, "Equal"

    # Rule 2: The evolved instruction makes it difficult for the LLM to generate a response.
    if 'sorry' in response.lower() and "sorry" not in original_instruction.lower() and len(response.split()) < 80:
        return False, "Sorry"

    # Rule 3: The evolved instruction obviously copies some words from the evolving prompt.
    if any(phrase in evolved_instruction.lower() for phrase in ['given prompt', 'rewritten prompt', 'new prompt']):
        return False, "Leak from Prompt"

    return True, "Success"

# def instruction_multiple_rounds(evolved_instruction, evolved_response):
#     prompt = craeteDialoguePrompt(evolved_instruction, evolved_response)
#     return llm_generate(prompt)


def call_multiprocess(sub_data, output_dir, filename, args):

    save_path = os.path.join(output_dir, filename)
    w = open(save_path, 'a+')

    for _ in range(args.num_iterations):
        for example in tqdm(sub_data):
            # Stage 0: Check instruction
            instruction = example["content"][0]["content"]

            if instruction == "":
                continue

            # Stage 1: Evolve instruction
            evolved_instruction, function_names = instruction_evolver(instruction)
            logger.info("output:{}".format(repr(evolved_instruction)))
            evolved_instruction = clean_instruction(evolved_instruction)

            # Stage 2: Generate response from evolved instruction
            logger.info("input:{}".format(repr(evolved_instruction)))
            evolved_response = llm_generate(evolved_instruction)
            logger.info("output:{}".format(repr(evolved_response)))

            Stage 3: Eliminator
            success, reason = instruction_eliminator(instruction, evolved_instruction, evolved_response)
            if success:
                
                dic_item = {"evolved_instruction": evolved_instruction, "evolved_response": evolved_response,  "function_names":function_names} #"success":success,
                
                example["evolved_multiple_rounds"] = [dic_item]

                # print(example)
                logger.info('+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
                w.write(json.dumps(example, ensure_ascii=False)+'\n')
            
            
    w.close()

def generate_data(initial_dataset, args):
    print(f'INFO: data size: [{len(initial_dataset)}]')
    split_size = len(initial_dataset) // args.process_num
    if len(initial_dataset) % args.process_num != 0:
        split_size += 1

    p = Pool(args.process_num)
    for i in range(args.process_num):
        time.sleep(0.1)
        sub_data = initial_dataset[
            split_size * i :
            split_size * (i+1)
        ]
        filename = f'{i}.jsonl'
    
        p.apply_async(call_multiprocess, args=(sub_data, args.output_path, filename, args))
    print('waiting for all subprocesses done ...')
    p.close()
    p.join()

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num_iterations", type=int, default=1)
    parser.add_argument("--evol_model", type=str, default="gpt-4")
    parser.add_argument("--eliminator_model", type=str, default="gpt-4")
    parser.add_argument("--initial_dataset", type=str, default="/home/ubuntu/zhaohy/seed_0619/seed_0619.jsonl")
    parser.add_argument("--output_path", type=str, default="./output")

    parser.add_argument("--verbose", type=bool, default=True)
    parser.add_argument("--azure_config_path", type=str, default=None)
    parser.add_argument("--process_num", type=int, default=50)
    args = parser.parse_args()

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    # load data
    initial_dataset = load_dataset(args.initial_dataset)

    # generate sft data
    generate_data(initial_dataset, args)


if __name__ == '__main__':
    main()