from fastapi import FastAPI
from sse_starlette.sse import EventSourceResponse
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, TextStreamer
import asyncio
import torch
from concurrent.futures import ThreadPoolExecutor

from config.transformers_config import get_model_tokenizer

app = FastAPI()

# 初始化模型（保持accelerate自动分配）
model, tokenizer = get_model_tokenizer()

# 创建线程池执行器
executor = ThreadPoolExecutor(max_workers=4)


# 异步生成器
async def async_generate(prompt: str, max_tokens: int):
    # 初始化Pipeline
    generator = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        # device=model.device  # 关键点：对齐设备
    )

    streamer = TextStreamer(
        tokenizer,
        skip_prompt=True,  # 跳过输入的重复显示
        skip_special_tokens=True
    )

    # 创建异步队列
    queue = asyncio.Queue()

    # 同步生成函数
    def sync_generate():
        try:
            for chunk in generator(
                    prompt,
                    max_new_tokens=max_tokens,
                    streamer=streamer,  # 需要模型支持流式
                    return_full_text=False
            ):
                queue.put_nowait(chunk['generated_text'])
        except Exception as e:
            queue.put_nowait(f"[ERROR] {str(e)}")
        finally:
            queue.put_nowait(None)  # 结束信号

    # 在线程池中运行生成任务
    loop = asyncio.get_event_loop()
    await loop.run_in_executor(executor, sync_generate)

    # 流式输出
    while True:
        item = await queue.get()
        if item is None:
            break
        yield {"data": item}


# 流式端点
@app.get("/async_stream")
async def stream_endpoint(prompt: str, max_tokens: int = 200):
    return EventSourceResponse(async_generate(prompt, max_tokens))



if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)