'''
豆茉君
AI破局俱乐部合伙人
欢迎加微信 doumoman

详细拆解见下文:
https://doumomedia.feishu.cn/docx/F8wYduttboANxmxlTvZceCTEnIb

Github来源:
https://github.com/mshumer/gpt-prompt-engineer

主要更改：
1. 增加部分中文注释
2. 改为简体中文输出
3. 去掉所有wandb相关的内容
4. 提供了4个中文案例（与拆解文一致）

'''

from prettytable import PrettyTable
import openai
from tqdm import tqdm
import itertools
from tenacity import retry, stop_after_attempt, wait_exponential


openai.api_key = "你的API KEY"

system_gen_system_prompt = """Your job is to generate system prompts for ChatGPT, given a description of the use-case and some test cases.

The prompts you will be generating will be for freeform tasks, such as generating a landing page headline, an intro paragraph, solving a math problem, etc.

In your generated prompt, you should describe how the AI should behave in simplified Chinese. Include what it will see, and what it's allowed to output. Be creative with prompts to get the best possible results. The AI knows it's an AI -- you don't need to tell it this.

You will be graded based on the performance of your prompt... but don't cheat! You cannot include specifics about the test cases in your prompt. Any prompts with examples will be disqualified.

Most importantly, output NOTHING but the prompt. Do not include anything else in your message."""


ranking_system_prompt = """Your job is to rank the quality of two outputs generated by different prompts. The prompts are used to generate a response for a given task.

You will be provided with the task description, the test prompt, and two generations - one for each system prompt.

Rank the generations in order of quality. If Generation A is better, respond with 'A'. If Generation B is better, respond with 'B'.

Remember, to be considered 'better', a generation must not just be good, it must be noticeably superior to the other.

Also, keep in mind that you are a very harsh critic. Only rank a generation as better if it truly impresses you more than the other.

Respond with your ranking, and nothing else. Be fair and unbiased in your judgement."""

# K is a constant factor that determines how much ratings change
K = 32

CANDIDATE_MODEL = 'gpt-3.5-turbo'
CANDIDATE_MODEL_TEMPERATURE = 0.9

GENERATION_MODEL = 'gpt-3.5-turbo'
GENERATION_MODEL_TEMPERATURE = 0.8
GENERATION_MODEL_MAX_TOKENS = 300  # 设置回答的最大长度

N_RETRIES = 3  # number of times to retry a call to the ranking model if it fails
RANKING_MODEL = 'gpt-3.5-turbo'
RANKING_MODEL_TEMPERATURE = 0.5

NUMBER_OF_PROMPTS = 8  # this determines how many candidate prompts to generate... the higher, the more expensive, but the better the results will be


def generate_candidate_prompts(description, test_cases, number_of_prompts):
    '''
    根据用户写的任务目标、案例描述，结合预设的自动生成提示词的提示词，生成n条优化过的候选提示词
    '''
    outputs = openai.ChatCompletion.create(
        model=CANDIDATE_MODEL,
        messages=[
            {"role": "system", "content": system_gen_system_prompt},
            {"role": "user", "content": f"Here are some test cases:`{test_cases}`\n\nHere is the description of the use-case: `{description.strip()}`\n\nRespond with your prompt, and nothing else. Be creative."}
        ],
        temperature=CANDIDATE_MODEL_TEMPERATURE,
        n=number_of_prompts)

    prompts = []

    for i in outputs.choices:
        prompts.append(i.message.content)
    return prompts


def expected_score(r1, r2):
    return 1 / (1 + 10**((r2 - r1) / 400))


def update_elo(r1, r2, score1):
    '''
    更新两支队伍的评分
    '''
    e1 = expected_score(r1, r2)
    e2 = expected_score(r2, r1)
    return r1 + K * (score1 - e1), r2 + K * ((1 - score1) - e2)

# Get Score - retry up to N_RETRIES times, waiting exponentially between retries.


@retry(stop=stop_after_attempt(N_RETRIES), wait=wait_exponential(multiplier=1, min=4, max=70))
def get_score(description, test_case, pos1, pos2, ranking_model_name, ranking_model_temperature):
    '''
    判罚比赛结果
    '''
    score = openai.ChatCompletion.create(
        model=ranking_model_name,
        messages=[
            {"role": "system", "content": ranking_system_prompt},
            {"role": "user", "content": f"""Task: {description.strip()}
                                            Prompt: {test_case['prompt']}
                                            Generation A: {pos1}
                                            Generation B: {pos2}"""}
        ],
        logit_bias={  # 用于指定输出A或者B
            '32': 100,  # 'A' token
            '33': 100,  # 'B' token
        },
        max_tokens=1,
        temperature=ranking_model_temperature,
    ).choices[0].message.content
    return score


@retry(stop=stop_after_attempt(N_RETRIES), wait=wait_exponential(multiplier=1, min=4, max=70))
def get_generation(prompt, test_case):
    '''
    根据某一个候选提示词，结合某一案例，输出一个回答
    '''
    generation = openai.ChatCompletion.create(
        model=GENERATION_MODEL,
        messages=[
            {"role": "system", "content": prompt},
            {"role": "user", "content": f"{test_case['prompt']}"}
        ],
        max_tokens=GENERATION_MODEL_MAX_TOKENS,
        temperature=GENERATION_MODEL_TEMPERATURE,
    ).choices[0].message.content
    return generation


def test_candidate_prompts(test_cases, description, prompts):
    # Initialize each prompt with an ELO rating of 1200
    prompt_ratings = {prompt: 1200 for prompt in prompts}

    # Calculate total rounds for progress bar
    # 从NUMBER_OF_PROMPTS个prompts中任选两个，不重复，即C 2/n 的组合数量
    total_rounds = len(test_cases) * len(prompts) * (len(prompts) - 1) // 2

    # Initialize progress bar
    pbar = tqdm(total=total_rounds, ncols=70)

    # For each pair of prompts
    for prompt1, prompt2 in itertools.combinations(prompts, 2):
        # For each test case
        for test_case in test_cases:
            # Update progress bar
            pbar.update()

            # Generate outputs for each prompt
            generation1 = get_generation(prompt1, test_case)
            generation2 = get_generation(prompt2, test_case)

            # Rank the outputs
            score1 = get_score(description, test_case, generation1,
                               generation2, RANKING_MODEL, RANKING_MODEL_TEMPERATURE)
            score2 = get_score(description, test_case, generation2,
                               generation1, RANKING_MODEL, RANKING_MODEL_TEMPERATURE)

            # Convert scores to numeric values
            score1 = 1 if score1 == 'A' else 0 if score1 == 'B' else 0.5
            score2 = 1 if score2 == 'B' else 0 if score2 == 'A' else 0.5

            # Average the scores
            score = (score1 + score2) / 2

            # Update ELO ratings
            r1, r2 = prompt_ratings[prompt1], prompt_ratings[prompt2]
            r1, r2 = update_elo(r1, r2, score)
            prompt_ratings[prompt1], prompt_ratings[prompt2] = r1, r2

            # Print the winner of this round
            if score > 0.5:
                print(f"Winner获胜: {prompt1[20:40].strip('n')}")
            elif score < 0.5:
                print(f"Winner获胜: {prompt2[20:40].strip('n')}")
            else:
                print("Draw平局")

    # Close progress bar
    pbar.close()

    return prompt_ratings


def generate_optimal_prompt(description, test_cases, number_of_prompts=10):

    prompts = generate_candidate_prompts(
        description, test_cases, number_of_prompts)
    prompt_ratings = test_candidate_prompts(test_cases, description, prompts)

    # Print the final ELO ratingsz
    table = PrettyTable()
    table.field_names = ["Prompt", "Rating"]
    for prompt, rating in sorted(prompt_ratings.items(), key=lambda item: item[1], reverse=True):
        table.add_row([prompt, rating])

    print(table)


# this style of description tends to work well
description = "请写出一个提示词, 帮助人们更好地了解自己的才能，并基于这个才能给出一些关于如何在互联网上成为一个优秀的自媒体人的建议。"

test_cases = [
    {
        'prompt': '我是一个能源数字化领域的软件产品经理，我想要做小红书平台的博主，我对AI绘画有兴趣，并且会写Python程序。',
    },
    {
        'prompt': '我是一个国有企业的商务经理，我想做小红书平台的博主，我平时对首饰珠宝感兴趣。',
    },
    {
        'prompt': '我是一个律师，我平时喜欢在家做牛排和披萨。',
    },
    {
        'prompt': '我是一个程序员，我非常喜欢了解电脑显卡方面的信息和知识。',
    },
]

if __name__ == "__main__":
    generate_optimal_prompt(description, test_cases,
                            NUMBER_OF_PROMPTS)
