
import os
import time
from typing import Optional, List, Union, Literal, Generator
from threading import Thread
print("导入 torch")
import torch
print("导入 predictor")
from predictor import LanguageModelPredictor, GenerateTextConfig
print("导入 fastapi")
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from sse_starlette import EventSourceResponse
from pydantic import BaseModel, Field
app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class ModelCard(BaseModel):
    id: str
    object: str = "model"
    created: int = Field(default_factory=lambda: int(time.time()))
    owned_by: str = "owner"
    root: Optional[str] = None
    parent: Optional[str] = None
    permission: Optional[list] = None


class ModelList(BaseModel):
    object: str = "list"
    data: List[ModelCard] = []


class ChatMessage(BaseModel):
    role: Literal["user", "assistant", "system"]
    content: str


class DeltaMessage(BaseModel):
    role: Optional[Literal["user", "assistant", "system"]] = None
    content: Optional[str] = None


class ChatCompletionRequest(BaseModel):
    model: str
    messages: List[ChatMessage]
    temperature: Optional[float] = None
    top_p: Optional[float] = None
    max_length: Optional[int] = None
    stream: Optional[bool] = False


class ChatCompletionResponseChoice(BaseModel):
    index: int
    message: ChatMessage
    finish_reason: Literal["stop", "length"]


class ChatCompletionResponseStreamChoice(BaseModel):
    index: int
    delta: DeltaMessage
    finish_reason: Optional[Literal["stop", "length"]]


class ChatCompletionResponse(BaseModel):
    model: str
    object: Literal["chat.completion", "chat.completion.chunk"]
    choices: List[Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice]]
    created: Optional[int] = Field(default_factory=lambda: int(time.time()))


print("加载程序")
model_dir = [
    "./model",
    "./models"
]
models = []
for dir in model_dir:
    if os.path.exists(dir) is False:
        continue
    models += list(filter(lambda item: os.path.isdir(os.path.join(dir, item)), os.listdir(dir)))
print("models: " + str(models))

selected_model_name = models[0] if len(models) != 0 else ""
load_status = "未加载任何模型"

# 生成参数
DEFAULT_MAX_NEW_TOKENS = 512
predictor = None

if torch.cuda.is_available():
    torch.device("cuda")
    torch.backends.cuda.matmul.allow_tf32 = True

def get_model_path(model_name: str):
    for dir in model_dir:
        model_path = dir + "/" + model_name
        if os.path.exists(model_path):
            return dir + "/" + model_name

def load_model(model_name: str, dtype=torch.bfloat16):
    global predictor, load_status
    print(f"加载模型: {model_name}（{dtype}）")
    try:
        if predictor is not None:
            predictor.release()
        model_path = get_model_path(model_name)
        predictor = LanguageModelPredictor(model_path, dtype)
    except Exception as e:
        print(e)

    load_status = f"已加载模型: {model_name}"
    return load_status

@torch.inference_mode()
def chat(
        input_text,
        history: list[dict] = [],
        system_prompt: str = "",
        function: Literal["generate", "chat"] = "generate",
        stream = False,
        top_k: int = None,
        top_p: float = None,
        temperature: float = None,
        max_length: int = None,
        max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
    ):
    global predictor
    if predictor is None:
        raise Exception("请先加载模型")

    # 预处理
    print("user: " + input_text)
    if history is None:
        history = []
    if system_prompt is not None and len(system_prompt) != 0:
        history.append({"role": "system", "content": system_prompt})
    history.append({"role": "user", "content": input_text})

    # 生成
    assistant_response = ""
    if function == "generate":
        assistant_response = predictor.generate_text(
            history,
            max_new_tokens,
            GenerateTextConfig(
                stream=stream,
                max_length=max_length,
                temperature=temperature,
                top_k=top_k,
                top_p=top_p
            )
        )
    else:
        raise Exception("错误的生成方式")

    # 返回
    if stream:
        text = ""
        for chunk in assistant_response:
            if predictor.eos_token is not None:
                chunk = chunk.replace(predictor.eos_token, "")
            text += chunk
            # print("assistant: " + text)
            yield text
    else:
        print("assistant: " + assistant_response)
        yield assistant_response



async def to_stream_response_generator(model_id: str, generator: Generator):
    choice_data = ChatCompletionResponseStreamChoice(
        index=0,
        delta=DeltaMessage(role="assistant"),
        finish_reason=None
    )
    chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
    yield chunk.model_dump_json(exclude_unset=True)

    current_length = 0
    generated_text = ""
    for new_response in generator:
        if len(new_response) == current_length:
            continue

        new_text = new_response[current_length:]
        current_length = len(new_response)
        generated_text += new_text

        choice_data = ChatCompletionResponseStreamChoice(
            index=0,
            delta=DeltaMessage(content=new_text),
            finish_reason=None
        )
        chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
        yield chunk.model_dump_json(exclude_unset=True)

    print("assistant: " + generated_text, end="\n\n")
    choice_data = ChatCompletionResponseStreamChoice(
        index=0,
        delta=DeltaMessage(),
        finish_reason="stop"
    )
    chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk")
    yield chunk.model_dump_json(exclude_unset=True)
    yield '[DONE]'


@app.get("/models", response_model=ModelList)
async def list_models():
    global models
    return ModelList(data=list(map(lambda item: {"id": item}, models)))

@app.post("/chat/completions", response_model=ChatCompletionResponse)
async def chat_completions(request: ChatCompletionRequest):

    print(request.messages)
    if len(request.messages) == 0:
        return ChatCompletionResponse(
            model=request.model,
            object="chat.completion",
            choices=[],
        )

    input_message = request.messages.pop()
    try:
        generator = chat(
            input_message.content,
            request.messages,
            None,
            "generate",
            request.stream,
            None,
            request.top_p,
            request.temperature,
            request.max_length,
            DEFAULT_MAX_NEW_TOKENS,
        )
    except Exception as e:
        print(e)
        return ChatCompletionResponse(
            model=request.model,
            object="chat.completion",
            choices=[],
        )

    if request.stream:
        stream_generator = to_stream_response_generator(request.model, generator)
        return EventSourceResponse(stream_generator)

    response = next(generator)
    choice = ChatCompletionResponseChoice(
        index=0,
        message=ChatMessage(
            role="assistant",
            content=response
        ),
        finish_reason="stop",
    )
    return ChatCompletionResponse(
        model=request.model,
        object="chat.completion",
        choices=[choice],
    )

try:
    dtypes = [
        torch.bfloat16,
        torch.float16,
        torch.float32,
        torch.float64,
        torch.uint8,
        torch.int8,
        torch.int16,
        torch.int32,
        torch.int64,
    ]

    print("请选择模型：")
    for i, m in enumerate(models):
        print(f"  {i}: {m}")
    model_index = int(input("请输入模型索引："))
    print("请选择数据类型（默认为bfloat16）：")
    for i, t in enumerate(dtypes):
        print(f"  {i}: {t}")
    try:
        dtype_index = int(input("请输入数据类型索引："))
    except:
        dtype_index = 0
    print("是否使用AirLLM？")
    print("1.使用")
    print("2.不使用")
    try:
        use_airllm = int(input("请输入选项索引：")) == 1
    except:
        use_airllm = False
    load_model(
        models[model_index],
        dtypes[dtype_index],
        use_airllm
    )
    uvicorn.run(app, host="0.0.0.0", port=8000)
except Exception as e:
    print(e)
    exit(0)


