from tqdm import tqdm
import multiprocessing
import json
import openai
from argparse import ArgumentParser

# OpenAI全局配置
openai.base_url = "https://free.v36.cm/v1/"
openai.default_headers = {"x-foo": "true"}
# openai.api_key = "sk-ygs1G1n4rb74v8fJ51F6B4D3749a4d978aB99b4308D2A993"
openai.api_key = "sk-GbQ7V9AKYPUSLsu88e76A5C7B152449cAbFa0f5eCbD18aE3"

meta_prompt_open = """
I need your help to evaluate the performance of several models in the speech interaction scenario. The models will receive a speech input from the user, which they need to understand and respond to with a speech output.
Your task is to rate the model’s responses based on the provided user input transcription [Instruction] and the model’s output transcription [Response].

Please evaluate the response on a scale of 1 to 5:
1 point: The response is largely irrelevant, incorrect, or fails to address the user’s query. It may be off-topic or provide incorrect information.
2 points: The response is somewhat relevant but lacks accuracy or completeness. It may only partially answer the user’s question or include extraneous information.
3 points: The response is relevant and mostly accurate, but it may lack conciseness or include unnecessary details that don’t contribute to the main point.
4 points: The response is relevant, accurate, and concise, providing a clear answer to the user’s question without unnecessary elaboration.
5 points: The response is exceptionally relevant, accurate, and to the point. It directly addresses the user’s query in a highly effective and efficient manner, providing exactly the information needed.

Below are the transcription of user’s instruction and models’ response:
### [Instruction]: {prompt}
### [Response]: {response}

After evaluating, please output the score only without anything else.
You don’t need to provide any explanations.
"""

meta_prompt_qa = """
### Question
{prompt}

### Reference answer
{reference}

### Candidate answer
{response}

Is the candidate answer correct based on the question and reference answer? 
Please only output a single "Yes" or "No". Do not output anything else.
""".strip()

def generate(item):
    try:
        # 构造prompt（保持原样）
        if "reference" in item:
            prompt = meta_prompt_qa.replace('{prompt}', item['prompt'])\
                                  .replace('{reference}', item['reference'])\
                                  .replace('{response}', item['response'][0])
        else:
            prompt = meta_prompt_open.replace("{prompt}", item['prompt'])\
                                     .replace('{response}', item['response'][0])

        # 新版API调用方式
        completion = openai.chat.completions.create(
            model="gpt-4o-mini",
            messages=[
                {
                    "role": "system",
                    "content": "You are a helpful assistant who tries to help answer the user's question."
                },
                {
                    "role": "user",
                    "content": prompt
                }
            ],
            temperature=0.5,
            top_p=0.95,
            n=3,
            max_tokens=1024,
            frequency_penalty=0,
            presence_penalty=0
        )
        
        # 提取结果
        rtn = [choice.message.content.strip() for choice in completion.choices]
        item['score'] = rtn
    except Exception as e:
        print(f"Error processing item {item.get('id', 'unknown')}: {str(e)}")
        item['score'] = ["Error"] * 3
    return item

def main():
    parser = ArgumentParser()
    parser.add_argument('--src_file', required=True)
    args = parser.parse_args()

    # 读取数据
    data = []
    with open(args.src_file, 'r') as f:
        for line in f:
            try:
                data.append(json.loads(line.strip()))
            except json.JSONDecodeError:
                print(f"Invalid JSON: {line}")

    # 多进程处理
    with multiprocessing.Pool(1) as pool:
        scores = list(tqdm(pool.imap(generate, data), total=len(data)))

    # 保存结果
    import os
    dir_path = os.path.dirname(args.src_file)
    base_name = os.path.basename(args.src_file)
    tgt_file = os.path.join(dir_path, f"result-{base_name}")
    # tgt_file = f"result-{args.src_file}"
    with open(tgt_file, "w", encoding='utf-8') as f:
        for d in scores:
            f.write(json.dumps(d, ensure_ascii=False) + "\n")

if __name__ == '__main__':
    main()