from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from sse_starlette.sse import EventSourceResponse
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
import torch
import asyncio
import threading
from typing import Generator

from config.transformers_config import get_model_tokenizer

# 初始化FastAPI
app = FastAPI(title="AI Stream API")

# 允许跨域
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_methods=["*"],
    allow_headers=["*"],
)


model, tokenizer = get_model_tokenizer()

# 自定义流式处理器
class APIStreamer(TextStreamer):
    def __init__(self, tokenizer, timeout=30):
        super().__init__(tokenizer, skip_prompt=True, skip_special_tokens=True)
        self.queue = asyncio.Queue()
        self.stop_signal = threading.Event()
        self.timeout = timeout

    def on_finalized_text(self, text: str, stream_end: bool = False):
        """将生成文本放入异步队列"""
        if not self.stop_signal.is_set():
            self.queue.put_nowait(text.strip())

        if stream_end:
            self.queue.put_nowait(None)  # 结束信号

    async def generator(self) -> Generator[str, None, None]:
        """生成器用于SSE推送"""
        start_time = asyncio.get_event_loop().time()
        while True:
            try:
                # 超时检查
                if (asyncio.get_event_loop().time() - start_time) > self.timeout:
                    yield {"data": "[ERROR] 生成超时"}
                    break

                # 获取队列内容
                item = await asyncio.wait_for(self.queue.get(), timeout=1)
                if item is None:  # 正常结束
                    break
                yield {"data": item}

            except asyncio.TimeoutError:
                if self.stop_signal.is_set():
                    break


# 流式生成端点
@app.get("/api/stream")
async def stream_generate(prompt: str,  request: Request, max_tokens: int = 200,):
    # 初始化流处理器
    streamer = APIStreamer(tokenizer)

    # 生成参数
    generate_kwargs = {
        "input_ids": tokenizer.encode(prompt, return_tensors="pt").to(model.device),
        "streamer": streamer,
        "max_new_tokens": max_tokens,
        "temperature": 0.7,
        "do_sample": True
    }

    # 在独立线程中运行生成任务
    def generate():
        try:
            model.generate(**generate_kwargs)
            streamer.queue.put_nowait(None)  # 确保结束信号
        except Exception as e:
            streamer.queue.put_nowait(f"[ERROR] {str(e)}")
        finally:
            streamer.stop_signal.set()

    # 启动生成线程
    thread = threading.Thread(target=generate)
    thread.start()

    # 定义客户端断开时的清理
    async def disconnect_handler():
        while thread.is_alive():
            if await request.is_disconnected():
                streamer.stop_signal.set()
                thread.join(timeout=1)
                break
            await asyncio.sleep(0.5)

    # 异步处理流输出
    async def event_generator():
        task = asyncio.create_task(disconnect_handler())
        async for event in streamer.generator():
            yield event
        task.cancel()

    return EventSourceResponse(event_generator())


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)
