# 使用 openai api，批量获取结果。比如：vllm，ollama的openai api兼容的大模型服务

import asyncio
from datetime import datetime
import time

from openai import AsyncOpenAI, OpenAI


def construct_input(input_list, system_prompt=""):
    api_input = list()
    if system_prompt != "":
        api_input.append({"role": "system", "content": system_prompt})
    for i, item in enumerate(input_list):
        if i % 2 == 0:
            api_input.append({"role": "user", "content": item})
        else:
            api_input.append({"role": "assistant", "content": item})
    return api_input


async def generate_queue(async_client: AsyncOpenAI, model, worker_id, data, system_prompt):
    """
    data 是一个列表,每个元素是一个字典，包含一个 prompt，输出存入{model}_answer中
    """
    result = []
    for i, item in enumerate(data):
        print(f"Worker {worker_id} task {i+1}/{len(data)} is running.")
        try:
            completion = await async_client.chat.completions.create(
                model=model,
                messages=construct_input([item["prompt"]], system_prompt),
            )
            answer = completion.choices[0].message.content
            cot = completion.choices[0].message.reasoning_content
            # print(f"Worker {worker_id} >>>> " + answer + "<<<<<")
            if answer is None:
                answer = ""
            item["output"] = answer
            if cot is None:
                cot = ""
            item["cot"] = cot
            result.append(item)
            print(f"Worker {worker_id} task {i+1}/{len(data)} is completed.")
        except Exception as e:
            print(f"Worker {worker_id} task {i+1}/{len(data)} failed with error: {e}")
            item[f"output"] = ""
            result.append(item)
    print(f"Worker {worker_id} {len(data)} is completed.")
    return result



async def batch_generate(base_url, api_key, model, dataset, concurrent=2, system_prompt=""):
    try:
        queues = []
        for i in range(concurrent):
            queues.append([])
        for i, data in enumerate(dataset):
            queues[i % concurrent].append(data)
        start = datetime.now()
        # 避免在线程内部创建，导致最后没有关闭
        async with AsyncOpenAI(base_url=base_url, api_key=api_key) as async_client:
            # 创建任务列表
            tasks = [
                generate_queue(async_client, model, i, queues[i], system_prompt)
                for i in range(concurrent)
            ]
            result = await asyncio.gather(*tasks)
        end = datetime.now()
        print(f"Total time: {end - start}, Total task: {len(dataset)}")
        all_results = []
        for r in result:
            all_results.extend(r)
        return all_results
    except Exception as e:
        print(">>> 异常！\n",e)
