# ====================================================================================
# 文件: preprocessing/base_llm_handler.py
# 描述: [V2 修复] 基础 LLM 异步请求处理器。
#      - 修复: 正确传递 vLLM API 所需的 model_id
# ====================================================================================

import aiohttp
import asyncio
from tqdm import tqdm
from tqdm.asyncio import tqdm_asyncio
import jsonlines


# --- CIKGRec [1] ---
# 提示词生成函数
def get_prompt_generate(history, field):
    f = field.strip('s')
    prompt = '[' + ', '.join(history) + ']'
    return prompt


def get_system_generate(history, field):
    f = field.strip('s')
    return (f"You will be provided with a list of {field} an anonymous user has liked, and "
            f"your task is to infer the user's interests based on the list and your extensive knowledge. "
            f"List no more than five the top interests of this anonymous user. "
            f"No further explanation is needed. Please use a comma to split the interests.")


# --- CIKGRec [1] 结束 ---


async def _fetch(session, uid, payload, endpoint_url):
    """
    (内部) 异步发送单个 vLLM 请求。
    """
    try:
        # vLLM 可能需要时间处理批次，设置较长超时
        timeout = aiohttp.ClientTimeout(total=300)  # 5分钟
        async with session.post(endpoint_url, json=payload, timeout=timeout) as response:
            if response.status == 200:
                result = await response.json()
                content = result.get("choices", [{}])[0].get("message", {}).get("content", "")
                return {"custom_id": str(uid), "response_content": content}
            else:
                error_text = await response.text()
                return {"custom_id": str(uid), "error": f"HTTP Status {response.status}: {error_text}"}
    except asyncio.TimeoutError:
        return {"custom_id": str(uid), "error": "Request timed out after 300s"}
    except Exception as e:
        return {"custom_id": str(uid), "error": str(e)}


async def _process_batches(user_his_dict, endpoint_url, vllm_model_id, field, concurrency_limit):
    """
    (内部) 使用信号量管理并发请求。
    [V2 修复]: 接收 vllm_model_id
    """
    tasks = []
    semaphore = asyncio.Semaphore(concurrency_limit)

    async with aiohttp.ClientSession() as session:
        for uid, history in user_his_dict.items():
            message = [
                {"role": "system", "content": get_system_generate(history, field)},
                {"role": "user", "content": get_prompt_generate(history, field)}
            ]

            # [!! 核心修复 V2 !!]
            # 'model' 字段必须与 vLLM /v1/models 接口返回的 "id" 完全一致
            payload = {
                "model": vllm_model_id,
                "messages": message,
                "max_tokens": 60,  # 限制兴趣输出长度
                "temperature": 0.0  # 确定性输出
            }

            async def bounded_fetch(uid, payload, endpoint_url):
                async with semaphore:
                    return await _fetch(session, uid, payload, endpoint_url)

            tasks.append(bounded_fetch(uid, payload, endpoint_url))

        # 使用 tqdm_asyncio 显示进度条
        results = [
            await f
            for f in tqdm_asyncio.as_completed(tasks, total=len(tasks), desc="[LLM] 正在生成用户兴趣")
        ]
        return results


def run_llm_requests(user_his_dict, host, vllm_model_id, field, concurrency, output_jsonl_file):
    """
    [主函数] 运行所有 vLLM/LLM 请求并保存结果。
    [V2 修复]: 接收 vllm_model_id
    """

    endpoint_url = f"{host.strip('/')}/v1/chat/completions"

    print(f"[LLM] 开始处理 {len(user_his_dict)} 个用户的并发请求...")
    print(f"[LLM] 并发数: {concurrency}")
    print(f"[LLM] API 端点: {endpoint_url}")
    print(f"[LLM] 使用模型 ID: {vllm_model_id}")  # (新增日志)

    # 修复在Jupyter中可能出现的EventLoop问题
    try:
        loop = asyncio.get_event_loop()
        if loop.is_running():
            print("[LLM] 检测到正在运行的 asyncio 循环。")
            import nest_asyncio
            nest_asyncio.apply()
            loop = asyncio.get_event_loop()
    except:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

    all_results = loop.run_until_complete(
        _process_batches(user_his_dict, endpoint_url, vllm_model_id, field, concurrency)
    )

    print("[LLM] 所有请求处理完毕。")

    # 保存结果
    success_count = 0
    error_count = 0
    with jsonlines.open(output_jsonl_file, mode='w') as writer:
        for res in all_results:
            if "response_content" in res:
                writer.write(res)
                success_count += 1
            else:
                print(f"[LLM] 错误 (用户 {res.get('custom_id', 'UNKNOWN')}): {res.get('error', 'Unknown error')}")
                error_count += 1

    print(f"[LLM] 成功保存 {success_count} 条结果到 {output_jsonl_file}")
    if error_count > 0:
        print(f"[LLM] 警告: {error_count} 个请求失败。")

    return success_count > 0