# Copyright (c) 2024-present AI-Labs

import time
import torch
import random
import string
from queue import Queue

from fastapi import APIRouter, HTTPException, Response

from ipex_llm.transformers.npu_model import AutoModelForCausalLM
from transformers import AutoTokenizer

from sse_starlette.sse import EventSourceResponse

from configs import config
from .vllms import *


class BaseStreamer:
    """
    Base class from which `.generate()` streamers should inherit.
    """

    def put(self, value):
        """Function that is called by `.generate()` to push new tokens"""
        raise NotImplementedError()

    def end(self):
        """Function that is called by `.generate()` to signal the end of generation"""
        raise NotImplementedError()


class TextIteratorStreamer(BaseStreamer):
    def __init__(self, tokenizer, skip_prompt: bool = False, **decode_kwargs):
        self.text_queue = Queue()
        self.tokenizer = tokenizer
        self.skip_prompt = skip_prompt
        self.decode_kwargs = decode_kwargs

        # variables used in the streaming process
        self.token_cache = []
        self.print_len = 0
        self.next_tokens_are_prompt = True

    def put(self, value):
        """
        Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
        """
        if len(value.shape) > 1 and value.shape[0] > 1:
            raise ValueError("TextStreamer only supports batch size 1")
        elif len(value.shape) > 1:
            value = value[0]

        if self.skip_prompt and self.next_tokens_are_prompt:
            self.next_tokens_are_prompt = False
            return

        # Add the new token to the cache and decodes the entire thing.
        self.token_cache.extend(value.tolist())
        text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)

        # After the symbol for a new line, we flush the cache.
        if text.endswith("\n"):
            printable_text = text[self.print_len :]
            self.token_cache = []
            self.print_len = 0
        # If the last token is a CJK character, we print the characters.
        elif len(text) > 0 and self._is_chinese_char(ord(text[-1])):
            printable_text = text[self.print_len :]
            self.print_len += len(printable_text)
        # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
        # which may change with the subsequent token -- there are probably smarter ways to do this!)
        else:
            printable_text = text[self.print_len : text.rfind(" ") + 1]
            self.print_len += len(printable_text)

        self.on_finalized_text(printable_text)

    def end(self):
        """Flushes any remaining cache and prints a newline to stdout."""
        # Flush the cache, if it exists
        if len(self.token_cache) > 0:
            text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
            printable_text = text[self.print_len :]
            self.token_cache = []
            self.print_len = 0
        else:
            printable_text = ""

        self.next_tokens_are_prompt = True
        self.on_finalized_text(printable_text, stream_end=True)

    def on_finalized_text(self, text: str, stream_end: bool = False):
        self.text_queue.put(text)
        if stream_end:
            self.text_queue.put(None)

    def _is_chinese_char(self, cp):
        """Checks whether CP is the codepoint of a CJK character."""
        # This defines a "chinese character" as anything in the CJK Unicode block:
        #   https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
        #
        # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
        # despite its name. The modern Korean Hangul alphabet is a different block,
        # as is Japanese Hiragana and Katakana. Those alphabets are used to write
        # space-separated words, so they are not treated specially and handled
        # like the all of the other languages.
        if (
            (cp >= 0x4E00 and cp <= 0x9FFF)
            or (cp >= 0x3400 and cp <= 0x4DBF)  #
            or (cp >= 0x20000 and cp <= 0x2A6DF)  #
            or (cp >= 0x2A700 and cp <= 0x2B73F)  #
            or (cp >= 0x2B740 and cp <= 0x2B81F)  #
            or (cp >= 0x2B820 and cp <= 0x2CEAF)  #
            or (cp >= 0xF900 and cp <= 0xFAFF)
            or (cp >= 0x2F800 and cp <= 0x2FA1F)  #
        ):  #
            return True

        return False

    def __iter__(self):
        return self

    def __next__(self):
        return self.text_queue.get()


# 定义路由信息
router = APIRouter(
    prefix='',
    tags = ['文本生成']
)

# 使用配置文件
model_path = config.service.chatvllm.model_path
model_name = config.service.chatvllm.model_name

# 加载模型权重
model = AutoModelForCausalLM.load_low_bit(
    model_path,
    attn_implementation="eager",
    torch_dtype=torch.float16,
    optimize_model=True,
    max_context_len=81920,
    max_prompt_len=4096,
)
model.kv_len=81920
# 加载Tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)


"""
定义生成ID的方法
"""
def generate_id(prefix: str, k=29) -> str:
    suffix = ''.join(random.choices(string.ascii_letters + string.digits, k=k))
    return f"{prefix}{suffix}"


"""
流式响应预测
"""
async def predict_stream(streamer):
    has_send_first_chunk = False

    for delta_text in streamer:
        created_time = int(time.time())
        response_id = generate_id('chatcmpl-', 29)
        system_fingerprint = generate_id('fp_', 9)
        if not has_send_first_chunk:
            message = DeltaMessage(
                content="",
                role="assistant",
                function_call=None,
            )
            choice_data = ChatCompletionResponseStreamChoice(
                index=0,
                delta=message,
                finish_reason=None
            )
            chunk = ChatCompletionResponse(
                model=model_name,
                id=response_id,
                choices=[choice_data],
                created=created_time,
                system_fingerprint=system_fingerprint,
                object="chat.completion.chunk"
            )
            yield chunk.model_dump_json(exclude_unset=True)
            has_send_first_chunk = True

        message = DeltaMessage(
            content=delta_text,
            role="assistant",
            function_call=None,
        )
        choice_data = ChatCompletionResponseStreamChoice(
            index=0,
            delta=message,
            finish_reason=None
        )
        chunk = ChatCompletionResponse(
            model=model_name,
            id=response_id,
            choices=[choice_data],
            created=created_time,
            system_fingerprint=system_fingerprint,
            object="chat.completion.chunk"
        )
        yield chunk.model_dump_json(exclude_unset=True)

    yield '[DONE]'


"""
对外暴露的健康检查接口
"""
@router.get("/health")
async def health() -> Response:
    return Response(status_code=200)


"""
对外暴露的模型列表接口
"""
@router.get("/v1/models", response_model=ModelList)
async def list_models():
    model_card = ModelCard(id=model_name)
    return ModelList(data=[model_card])


"""
对外暴露的文本续写接口
"""
@router.post("/v1/completions", response_model=CompletionResponse)
async def create_completion(request: CompletionRequest):
    messages = []
    for prompt in request.prompt:
        message = ChatMessage(role="system",content=prompt)
        messages.append(message)

    gen_params = dict(
        messages=messages,
        temperature=request.temperature,
        top_p=request.top_p,
        max_tokens=request.max_tokens or 2048,
        echo=False,
        stream=request.stream,
        repetition_penalty=request.repetition_penalty,
        tools=request.tools,
        tool_choice=request.tool_choice,
    )
    response = ""
    # async for response in generate_stream(gen_params):
    #     pass

    if response["text"].startswith("\n"):
        response["text"] = response["text"][1:]
    response["text"] = response["text"].strip()

    usage = UsageInfo()

    function_call, finish_reason = None, "length"
    tool_calls = None

    choice_data = CompletionResponseChoice(
        index=0,
        text=response["text"],
        finish_reason=finish_reason,
    )
    task_usage = UsageInfo.model_validate(response["usage"])
    for usage_key, usage_value in task_usage.model_dump().items():
        setattr(usage, usage_key, getattr(usage, usage_key) + usage_value)

    return CompletionResponse(
        model=request.model,
        choices=[choice_data],
        object="text_completion",
        usage=usage
    )


"""
对外暴露的模型对话接口
"""
@router.post("/v1/chat/completions", response_model=ChatCompletionResponse)
async def create_chat_completion(request: ChatCompletionRequest):
    # 先判断请求是否合法
    if len(request.messages) < 1 or request.messages[-1].role == "assistant":
        raise HTTPException(status_code=400, detail="Invalid request")

    # 应用对话模板
    text = tokenizer.apply_chat_template(request.messages,
                                         tokenize=False,
                                         add_generation_prompt=True)

    input_ids = tokenizer([text], return_tensors="pt").input_ids

    # 是否流式响应的处理
    if request.stream:
        streamer = TextIteratorStreamer(tokenizer=tokenizer, skip_prompt=True, skip_special_tokens=True)
    else:
        streamer = None

    # 模型推理，生成响应结果
    output = model.generate(
        input_ids, num_beams=1, do_sample=False, max_new_tokens=8192, streamer=streamer
    )

    if request.stream:
        pass

    # 获取推理结果并返回
    output_str = tokenizer.decode(output[0], skip_special_tokens=True, skip_prompt=True).split("assistant\n")[-1]
        
    message = ChatMessage(
        role="assistant",
        content=output_str,
        function_call=None,
        tool_calls=None,
    )

    choice_data = ChatCompletionResponseChoice(
        index=0,
        message=message,
        finish_reason="stop",
    )

    return ChatCompletionResponse(
        model=request.model,
        choices=[choice_data],
        object="chat.completion",
        usage=None
    )

    # inputs = tokenizer([text], return_tensors="pt").input_ids
    # generation_kwargs = dict(num_beams=1, do_sample=False, streamer=streamer, max_new_tokens=32)

    # thread = Thread(target=model.generate, args=[inputs], kwargs=generation_kwargs)
    # thread.start()
    
    # predict_stream_generator = predict_stream(streamer)
    # return EventSourceResponse(predict_stream_generator, media_type="text/event-stream")
