import asyncio
from transformers import AutoTokenizer, AutoModelForCausalLM
from .vector_search import search_jokes, search_joke
import torch

from config import Config
# === 加载轻量模型（建议Qwen2-mini或TinyLlama）===
MODEL_NAME = Config.GEN_MODEL_NAME

# 根据是否可用GPU选择合适精度与设备映射
if torch.cuda.is_available():
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME,
        torch_dtype=torch.float16,
        device_map="auto",
    )
else:
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME,
        torch_dtype=torch.float32,
        device_map=None,
    ).to("cpu")

# 并发限制与超时
_GENERATE_SEM = asyncio.Semaphore(Config.GEN_CONCURRENCY)
_INFER_TIMEOUT_S = Config.INFER_TIMEOUT_S
_SEARCH_TIMEOUT_S = Config.SEARCH_TIMEOUT_S


async def get_ai_reply(user_msg: str) -> str:
    """生成AI回复（非阻塞），简短且相关"""
    loop = asyncio.get_running_loop()

    async def _search():
        return await loop.run_in_executor(None, search_jokes, user_msg)

    # 向量检索设置超时，避免长期卡住
    try:
        jokes = await asyncio.wait_for(_search(), timeout=Config.SEARCH_TIMEOUT_S)
    except Exception:
        jokes = []
    if not jokes:
        jokes = []

    # 分离“话题”和“参考笑话”
    refs_block = "\n- ".join([j for j in jokes]) if jokes else "(无)"

    # 使用 chat template 生成 prompt，提升对齐
    messages = [
        {
            "role": "system",
            "content": "你是一个擅长讲笑话的中文助手，任务是根据用户的话题创作笑话。请严格遵循：\n"
                    "1. 只关注用户直接说的内容（用户消息是创作核心）；\n"
                    "2. 你会收到一些「参考笑话」，这些是给你独自参考的素材（用户看不到这些，你也不能向用户提及它们）；\n"
                    "3. 用参考笑话的灵感创作原创笑话（可融合梗或角度，禁止直接复制、拼接）；\n"
                    "4. 笑话结尾必须加一句简短追问（如：这个好笑吗？还想听别的吗？）。"
        },
        {
            "role": "user",
            "content": user_msg  # 这里只放用户的原始输入，完全纯净
        },
        {
            "role": "assistant",  # 用assistant角色传递参考素材，模型会视为内部信息
            "content": f"【仅助手可见的参考笑话，用于创作灵感】\n{refs_block}"
        }
    ]

    prompt = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )

    def _generate() -> str:
        inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
        output = model.generate(
            **inputs,
            max_new_tokens=Config.GEN_MAX_NEW_TOKENS,
            do_sample=True,
            temperature=Config.GEN_TEMPERATURE,
            top_p=Config.GEN_TOP_P,
            repetition_penalty=Config.GEN_REPETITION_PENALTY,
            eos_token_id=tokenizer.eos_token_id,
        )
        # 仅解码新增部分，避免把提示一起返回
        new_tokens = output[0][inputs["input_ids"].shape[1]:]
        return tokenizer.decode(new_tokens, skip_special_tokens=True).strip()

    async with _GENERATE_SEM:
        try:
            reply = await asyncio.wait_for(loop.run_in_executor(None, _generate), timeout=_INFER_TIMEOUT_S)
        except Exception:
            reply = "让我想想一个更好玩的说法…"  # 超时或异常的兜底
    return reply or "好嘞！"


async def warmup():
    """启动时的轻量预热，不阻塞主线程"""
    loop = asyncio.get_running_loop()

    def _prep():
        # 触发权重、Tokenizer与图构建
        _ = tokenizer("你好")
        inputs = tokenizer("你好", return_tensors="pt").to(model.device)
        _ = model.generate(**inputs, max_new_tokens=1)

    await loop.run_in_executor(None, _prep)
