import json
import asyncio
from tqdm import tqdm
from loguru import logger
from collections import deque
from openai import AsyncOpenAI

# 初始化 OpenAI 客户端
logger.info("Initializing OpenAI client...")
client = AsyncOpenAI()

logger.info("OpenAI client initialized.")

# 配置文件路径
data_file = "results.jsonl"

# 公用缓冲区
buffer = deque()


# 写入文件，追加写入
def write_to_file(data, output_file):
    with open(output_file, "a", encoding="utf-8") as f:
        for item in data:
            f.write(json.dumps(item, ensure_ascii=False) + "\n")


async def get_response_with_retry(prompt, semaphore, model, max_retries=5):
    for attempt in range(max_retries):
        async with semaphore:
            try:
                completion = await client.chat.completions.create(
                    model=model,
                    messages=[
                        {"role": "user", "content": prompt},
                    ],
                    max_tokens=8192,
                    stream=False,
                    extra_body={'chat_template_kwargs':{'enable_thinking': True}}
                )
                content = dict(
                        instruction=prompt,
                        input='',
                        output= "<think>\n" + completion.choices[0].message.reasoning_content + "</think>\n" + completion.choices[0].message.content
                        )
                return content
            except Exception as e:
                if attempt == max_retries - 1:
                    logger.error(f"Failed to get response: {str(e)}")
                    return None
                await asyncio.sleep(2**attempt)

    logger.error(
        f"Failed to get response after {max_retries} retries for prompt '{prompt}'."
    )
    return None

def load_jsonl(file_path):
    """
    载入 JSONL 文件并返回一个包含所有 JSON 对象的列表。
    
    :param file_path: JSONL 文件的路径
    :return: 包含所有 JSON 对象的列表
    """
    data = []
    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            # 每一行是一个 JSON 对象，解析后添加到列表中
            data.append(json.loads(line.strip()))
    return data


async def main(semaphore_num: int, data1:list, data2:list, buffer_size: int):
    """
    主函数：异步处理多个任务。

    参数：
        semaphore_num (int): 并发信号量的数量，控制同时运行的任务数。
        data (list): 
        data_num (int): 设定的生成的故事条数。
        buffer_size (int): 缓冲区大小，达到该大小时将缓冲区内容写入文件。
    """
    data_num = len(data1)
    semaphore = asyncio.Semaphore(semaphore_num)
    lock = asyncio.Lock()  # 用于同步写入缓冲区
    pbar = tqdm(total=data_num, desc="Processing")

    # data = data[:data_num]

    prompts = []
    for idx in range(data_num):
        info1= data1[idx]
        info2= data2[idx]

        prompt = "根据用户请求，请判定以下两个回复内容的质量哪个更好。请在最后一行给出你的判断，回复1还是回复2的质量更好。"
        prompt += "\n 用户请求：" + info1['prompt'] + "\n"

        response1 = "\n 回复1： " + info1['predict'] + "\n"
        response2 = "\n 回复2： " + info2['predict'] + "\n"

        prompt += response1
        prompt += response2      

        prompts.append(prompt)

    # 生成故事
    # prompts = ["讲一个吸引人的、简短的故事。"] * data_num
    tasks = [
        get_response_with_retry(prompt, semaphore, model="model/qwen3-235b-a22b")
        for prompt in prompts
    ]

    for future in asyncio.as_completed(tasks):
        try:
            result = await future
            async with lock:  # 同步写入缓冲区
                buffer.append(result)  # 将结果添加到缓冲区，形式任意
                pbar.update(1)  # 更新进度条
                if len(buffer) >= buffer_size:  # 如果缓冲区满了，写入文件
                    write_to_file(buffer, data_file)
                    buffer.clear()
        except Exception as e:
            logger.error(f"Error occurred while processing: {e}")
            continue

    # 处理剩余的缓冲区内容
    async with lock:
        if buffer:
            write_to_file(buffer, data_file)
            buffer.clear()

    pbar.close()
    logger.info("All tasks completed.")


if __name__ == "__main__":
    data1= load_jsonl(file_path="data/from_qwen3/generated_predictions.jsonl")
    data2= load_jsonl(file_path="data/from_qwen3_tuning/generated_predictions.jsonl")
    
    asyncio.run(main(semaphore_num=20, data1=data1, data2=data2, buffer_size=100))
