from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from transformers import AutoTokenizer, TextStreamer
from ipex_llm.transformers import AutoModelForCausalLM
import torch

# 初始化模型和分词器
model_path = "C:\\models\\Qwen\\Qwen2.5-7B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model_in_fp16 = AutoModelForCausalLM.from_pretrained(
    pretrained_model_name_or_path=model_path,
    load_in_low_bit="sym_int4",
)
model_in_bf16_gpu = model_in_fp16.to('xpu')

# FastAPI 应用
app = FastAPI()

# OpenAI API 请求格式
class CompletionRequest(BaseModel):
    prompt: str
    max_tokens: int = 512
    stream: bool = False

# 流式生成函数
def generate_stream(prompt, max_tokens):
    messages = [
        {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(model_in_bf16_gpu.device)

    # 创建流对象
    streamer = TextStreamer(tokenizer)

    # 流式生成
    generated_ids = model_in_bf16_gpu.generate(
        **model_inputs,
        max_new_tokens=max_tokens,
        streamer=streamer
    )

    # 解码生成的 token 并返回
    for token_id in generated_ids[0]:
        token = tokenizer.decode(token_id, skip_special_tokens=True)
        yield f"data: {token}\n\n"

# OpenAI API 兼容的推理端点
@app.post("/v1/completions")
async def create_completion(request: CompletionRequest):
    if request.stream:
        # 流式返回
        return StreamingResponse(
            generate_stream(request.prompt, request.max_tokens),
            media_type="text/event-stream"
        )
    else:
        # 非流式返回
        messages = [
            {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
            {"role": "user", "content": request.prompt}
        ]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = tokenizer([text], return_tensors="pt").to(model_in_bf16_gpu.device)

        # 生成完整文本
        generated_ids = model_in_bf16_gpu.generate(
            **model_inputs,
            max_new_tokens=request.max_tokens
        )
        response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)

        return {
            "choices": [{"text": response}],
            "usage": {
                "generated_tokens": len(generated_ids[0]),
                "elapsed_time": 0  # 可根据需要添加时间计算
            }
        }

# 运行服务
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)