# Copyright (c) 2024-present AI-Labs

import time
import torch
import random
import string
from queue import Queue

from fastapi import APIRouter, HTTPException, Response

from ipex_llm.transformers import AutoModel

from sse_starlette.sse import EventSourceResponse

from configs import config
from .vllms import *


# 定义路由信息
router = APIRouter(
    prefix='',
    tags = ['文本生成']
)

# 使用配置文件
model_hub = config.service.chatvllm.model_hub
model_path = config.service.chatvllm.model_path
model_name = config.service.chatvllm.model_name

# 加载模型权重
model = AutoModel.from_pretrained(model_path,
                                  load_in_4bit=True,
                                  trust_remote_code=True,
                                  optimize_model=True,
                                  model_hub=model_hub)
model = model.half().to('xpu')

# 加载Tokenizer
if model_hub == 'modelscope':
    from modelscope import AutoTokenizer
else:
    from transformers import AutoTokenizer

tokenizer = AutoTokenizer.from_pretrained(model_path,
                                          trust_remote_code=True)


"""
定义生成ID的方法
"""
def generate_id(prefix: str, k=29) -> str:
    suffix = ''.join(random.choices(string.ascii_letters + string.digits, k=k))
    return f"{prefix}{suffix}"


"""
流式响应预测
"""
@torch.inference_mode
async def predict_stream(request: ChatCompletionRequest):
    has_send_first_chunk = False
    response_ = ""
    history = []
    for response, history in model.stream_chat(tokenizer, request.messages[-1].content, history=history):
        delta_text = response.replace(response_, "")
        response_ = response
        
        created_time = int(time.time())
        response_id = generate_id('chatcmpl-', 29)
        system_fingerprint = generate_id('fp_', 9)

        if not has_send_first_chunk:
            message = DeltaMessage(
                content="",
                role="assistant",
                function_call=None,
            )
            choice_data = ChatCompletionResponseStreamChoice(
                index=0,
                delta=message,
                finish_reason=None
            )
            chunk = ChatCompletionResponse(
                model=model_name,
                id=response_id,
                choices=[choice_data],
                created=created_time,
                system_fingerprint=system_fingerprint,
                object="chat.completion.chunk"
            )
            yield chunk.model_dump_json(exclude_unset=True)
            has_send_first_chunk = True

        message = DeltaMessage(
            content=delta_text,
            role="assistant",
            function_call=None,
        )
        choice_data = ChatCompletionResponseStreamChoice(
            index=0,
            delta=message,
            finish_reason=None
        )
        chunk = ChatCompletionResponse(
            model=model_name,
            id=response_id,
            choices=[choice_data],
            created=created_time,
            system_fingerprint=system_fingerprint,
            object="chat.completion.chunk"
        )
        yield chunk.model_dump_json(exclude_unset=True)
    yield '[DONE]'


"""
对外暴露的健康检查接口
"""
@router.get("/health")
async def health() -> Response:
    return Response(status_code=200)


"""
对外暴露的模型列表接口
"""
@router.get("/v1/models", response_model=ModelList)
async def list_models():
    model_card = ModelCard(id=model_name)
    return ModelList(data=[model_card])


"""
对外暴露的文本续写接口
"""
@router.post("/v1/completions", response_model=CompletionResponse)
async def create_completion(request: CompletionRequest):
    messages = []
    for prompt in request.prompt:
        message = ChatMessage(role="system",content=prompt)
        messages.append(message)

    gen_params = dict(
        messages=messages,
        temperature=request.temperature,
        top_p=request.top_p,
        max_tokens=request.max_tokens or 2048,
        echo=False,
        stream=request.stream,
        repetition_penalty=request.repetition_penalty,
        tools=request.tools,
        tool_choice=request.tool_choice,
    )
    response = ""
    # async for response in generate_stream(gen_params):
    #     pass

    if response["text"].startswith("\n"):
        response["text"] = response["text"][1:]
    response["text"] = response["text"].strip()

    usage = UsageInfo()

    function_call, finish_reason = None, "length"
    tool_calls = None

    choice_data = CompletionResponseChoice(
        index=0,
        text=response["text"],
        finish_reason=finish_reason,
    )
    task_usage = UsageInfo.model_validate(response["usage"])
    for usage_key, usage_value in task_usage.model_dump().items():
        setattr(usage, usage_key, getattr(usage, usage_key) + usage_value)

    return CompletionResponse(
        model=request.model,
        choices=[choice_data],
        object="text_completion",
        usage=usage
    )


"""
对外暴露的模型对话接口
"""
@router.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def create_chat_completion(request: ChatCompletionRequest):
    if len(request.messages) < 1 or request.messages[-1].role == "assistant":
        raise HTTPException(status_code=400, detail="Invalid request")

    text = tokenizer.apply_chat_template(request.messages,
                                         tokenize=False,
                                         add_generation_prompt=True)

    input_ids = tokenizer.encode([text], return_tensors="pt").to('xpu')
    output = model.generate(input_ids, max_new_tokens=32)

    if request.stream:
        predict_stream_generator = predict_stream(request)
        output = await anext(predict_stream_generator)
        if output:
            return EventSourceResponse(predict_stream_generator, media_type="text/event-stream")
    else:
        response, _ = model.chat(tokenizer, request.messages[-1].content, history=[])

        message = ChatMessage(
            role="assistant",
            content=response,
            function_call=None,
            tool_calls=None,
        )

        choice_data = ChatCompletionResponseChoice(
            index=0,
            message=message,
            finish_reason="stop",
        )

        return ChatCompletionResponse(
            model=request.model,
            choices=[choice_data],
            object="chat.completion",
            usage=None
        )
