import os
import json
import time
import argparse
import openai
import tqdm
from datetime import datetime

# gpt3 用完了
# keys = [
#     "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",
#     "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",
#     "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",
#     "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",
#     "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",
#     "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",
#     "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",
#     "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",
#     "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",
#     "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",
# ]

# gpt3
# keys = [
#     "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",
#     "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",
#     "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",
# ]

# gpt3
keys = [
    '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',
    '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',
    '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',
    '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',
    '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',
    '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',
    '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',
    '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',
    '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'
]

# gpt4
# keys = [
#     "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",
#     "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",
#     "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"
# ]

# gpt4
# keys = [
#     "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",
# ]

#gpt4
# keys = [
#     "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",
# ]

def key_generator_f(keys, rounds_per_key):
    force_new = False
    current_round = 0
    key_idx = 0
    while True:
        if force_new:
            key_idx += 1
            key_idx = 0 if key_idx >= len(keys) else key_idx
            current_round = 0
            print("Next key:", keys[key_idx])
            receiver = yield keys[key_idx]
        else:
            if current_round < rounds_per_key:
                current_round += 1
                receiver = yield keys[key_idx]
            else:
                current_round = 1
                key_idx += 1
                key_idx = 0 if key_idx >= len(keys) else key_idx
                print("Next Key:", keys[key_idx])
                receiver = yield keys[key_idx]
        force_new = receiver['force_new'] if receiver else False

def make_requests(
        engine, prompts, temperature, top_p, 
        frequency_penalty, presence_penalty, stop_sequences, n=1, retries=50, rounds_per_key=3
    ):
    response = None
    retry_cnt = 0
    backoff_time = 2
    key_generator = key_generator_f(keys, rounds_per_key)
    key = next(key_generator)
    openai.api_key = key
    while retry_cnt <= retries:
        try:
            response = openai.ChatCompletion.create(
                model=engine,
                messages=prompts,
                temperature=temperature,
                top_p=top_p,
                frequency_penalty=frequency_penalty,
                presence_penalty=presence_penalty,
                stop=stop_sequences,
                n=n,
            )
            break # 如果正常得到反馈就退出循环
        except openai.error.OpenAIError as e:
            print(f"OpenAIError: {e}.")
            if "Please reduce your prompt" in str(e):
                target_length = int(target_length * 0.8)
                print(f"overlength")
                break
            elif "quota" in str(e):
                print(f"{key} has not quota.")
                key_generator.send({'force_new': True})
                key = next(key_generator)
            elif "invalid_api_key" in str(e):
                print(f"{key} is an invalid_api_key.")
                key_generator.send({'force_new': True})
                key = next(key_generator)
            elif "Rate limit" in str(e):
                # key_generator = key_generator_f(keys, rounds_per_key)
                key = next(key_generator)
            else:
                print(f"Retrying in {backoff_time} seconds...")
                time.sleep(backoff_time)
            openai.api_key = key
            retry_cnt += 1
    
    if isinstance(response, str): # 是的话说明请求遇到了错误，且程序没办法解决
        data = {
            "prompt": prompts,
            "response": response,
            "created_at": str(datetime.now()),
        }
    else: # 成功请求
        data = {
            "prompt": prompts,
            "response": response,
            "created_at": str(datetime.now()),
        }
    return [data]

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input_file",
        type=str,
        help="The input file that contains the prompts to GPT3.",
    )
    parser.add_argument(
        "--output_file",
        type=str,
        help="The output file to save the responses from GPT3.",
    )
    parser.add_argument(
        "--engine",
        type=str,
        help="The openai GPT3 engine to use.",
    )
    parser.add_argument(
        "--max_tokens",
        default=500,
        type=int,
        help="The max_tokens parameter of GPT3.",
    )
    parser.add_argument(
        "--temperature",
        default=0.7,
        type=float,
        help="The temprature of GPT3.",
    )
    parser.add_argument(
        "--top_p",
        default=0.5,
        type=float,
        help="The `top_p` parameter of GPT3.",
    )
    parser.add_argument(
        "--frequency_penalty",
        default=0,
        type=float,
        help="The `frequency_penalty` parameter of GPT3.",
    )
    parser.add_argument(
        "--presence_penalty",
        default=0,
        type=float,
        help="The `presence_penalty` parameter of GPT3.",
    )
    parser.add_argument(
        "--stop_sequences",
        default=["\n\n"],
        nargs="+",
        help="The `stop_sequences` parameter of GPT3.",
    )
    parser.add_argument(
        "--logprobs",
        default=5,
        type=int,
        help="The `logprobs` parameter of GPT3"
    )
    parser.add_argument(
        "--n",
        type=int,
        help="The `n` parameter of GPT3. The number of responses to generate."
    )
    parser.add_argument(
        "--best_of",
        type=int,
        help="The `best_of` parameter of GPT3. The beam size on the GPT3 server."
    )
    parser.add_argument(
        "--use_existing_responses",
        action="store_true",
        help="Whether to use existing responses from the output file if it exists."
    )
    parser.add_argument(
        "--request_batch_size",
        default=1,
        type=int,
        help="The number of requests to send to GPT3 at a time."
    )
    return parser.parse_args()

if __name__ == "__main__":
    args = parse_args()
    os.makedirs(os.path.dirname(args.output_file), exist_ok=True)

    # read existing file if it exists
    existing_responses = {}
    if os.path.exists(args.output_file) and args.use_existing_responses:
        with open(args.output_file, "r") as fin:
            for line in fin:
                data = json.loads(line)
                existing_responses[data["prompt"]] = data

    # do new prompts
    with open(args.input_file, "r") as fin:
        if args.input_file.endswith(".jsonl"):
            all_prompts = [json.loads(line)["prompt"] for line in fin]
        else:
            all_prompt = [line.strip().replace("\\n", "\n") for line in fin]

    with open(args.output_file, "w") as fout:
        for i in tqdm.tqdm(range(0, len(all_prompts), args.request_batch_size)):
            batch_prompts = all_prompts[i: i + args.request_batch_size]
            if all(p in existing_responses for p in batch_prompts):
                for p in batch_prompts:
                    fout.write(json.dumps(existing_responses[p]) + "\n")
            else:
                results = make_requests(
                    engine=args.engine,
                    prompts=batch_prompts,
                    max_tokens=args.max_tokens,
                    temperature=args.temperature,
                    top_p=args.top_p,
                    frequency_penalty=args.frequency_penalty,
                    presence_penalty=args.presence_penalty,
                    stop_sequences=args.stop_sequences,
                    logprobs=args.logprobs,
                    n=args.n,
                    best_of=args.best_of,
                )
                for data in results:
                    fout.write(json.dumps(data) + "\n")