from pathlib import Path
from typing import Optional

import uvicorn
from fastapi import FastAPI, Request
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer, TextIteratorStreamer
from openai.types.chat import ChatCompletionChunk
import uuid
import json
import time
# 启动生成线程
import threading

# 加载本地模型
model_path = r"D:\model\Qwen\Qwen2.5-0.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto",  torch_dtype="auto")   # 自动选择数据类型（如float16）

class RequestItem(BaseModel):
    messages: str = None
    stream: bool = False


def register_routes(app):
    @app.post("/v1/test/lingxi")
    async def chat_completion_lingxi(item: RequestItem):
        # data = await request.json()
        message = item.messages
        stream = item.stream  # 接收是否流式传输的标识

        from fastapi.responses import StreamingResponse

        def generate_stream():
            prompt = f'你是一个守法的AI助手，也不能泄露自己的身份，现在用户提问中包含了{message}，你直接拒接回答这个问题'

            prompt_message = [
                {"role": "user", "content": prompt}
            ]

            text = tokenizer.apply_chat_template(
                prompt_message,
                tokenize=False,
                add_generation_prompt=True
            )

            # 初始化流式生成器
            streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
            inputs = tokenizer([text], return_tensors="pt").to(model.device)

            # 生成参数
            generation_kwargs = dict(
                inputs,
                streamer=streamer,
                max_new_tokens=1000,
                do_sample=True,
                temperature=0.7
            )

            thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
            thread.start()

            # 创建唯一ID和响应头
            chunk_id = f"chatcmpl-{uuid.uuid4()}"

            # 逐token生成并封装为 OpenAI 格式
            for new_text in streamer:
                print(new_text, end='')
                yield f"data: {new_text}\n\n"

            # 结束标志
            yield "data: [DONE]\n\n"

        return StreamingResponse(
            generate_stream(),
            media_type="text/event-stream"
        )


    @app.post("/v1/test/lingxi")
    async def chat_completion_lingxi(item: RequestItem):
        # data = await request.json()
        message = item.messages
        stream = item.stream  # 接收是否流式传输的标识

        from fastapi.responses import StreamingResponse

        def generate_stream():

            prompt = f'你是一个守法的AI助手，也不能泄露自己的身份，现在用户提问中包含了{message}，你直接拒接回答这个问题'

            prompt_message = [
                {"role": "user", "content": prompt}
            ]

            text = tokenizer.apply_chat_template(
                prompt_message,
                tokenize=False,
                add_generation_prompt=True
            )

            # 初始化流式生成器
            streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
            inputs = tokenizer([text], return_tensors="pt").to(model.device)

            # 生成参数
            generation_kwargs = dict(
                inputs,
                streamer=streamer,
                max_new_tokens=1000,
                do_sample=True,
                temperature=0.7
            )


            thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
            thread.start()

            # 创建唯一ID和响应头
            chunk_id = f"chatcmpl-{uuid.uuid4()}"

            # 逐token生成并封装为 OpenAI 格式
            for new_text in streamer:
                print(new_text, end='')
                yield f"data: {new_text}\n\n"

            # 结束标志
            yield "data: [DONE]\n\n"

        return StreamingResponse(
            generate_stream(),
            media_type="text/event-stream"
        )



    @app.post("/v1/test/completions")
    async def chat_completion(item: RequestItem):
        # data = await request.json()
        message = item.messages
        stream = item.stream  # 接收是否流式传输的标识


        # 流式输出模式
        if stream:
            from fastapi.responses import StreamingResponse

            def generate_stream():
                # 初始化流式生成器
                streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
                inputs = tokenizer(message, return_tensors="pt").to(model.device)

                # 生成参数
                generation_kwargs = dict(
                    inputs,
                    streamer=streamer,
                    max_new_tokens=1000,
                    do_sample=True,
                    temperature=0.7
                )

                import transformers
                print(transformers.__version__)

                # 启动生成线程
                import threading
                thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
                thread.start()

                # 创建唯一ID和响应头
                chunk_id = f"chatcmpl-{uuid.uuid4()}"

                # 逐token生成并封装为 OpenAI 格式
                for new_text in streamer:
                    print(new_text)
                    chunk = ChatCompletionChunk(
                        id=chunk_id,
                        model="Qwen2.5-0.5B-Instruct",
                        object="chat.completion.chunk",
                        created=int(time.time()),
                        choices=[{
                            "index": 0,
                            "delta": {"content": new_text},
                            "finish_reason": None
                        }]
                    )
                    yield f"data: {chunk.model_dump_json()}\n\n"

                # 结束标志
                yield "data: [DONE]\n\n"

            return StreamingResponse(
                generate_stream(),
                media_type="text/event-stream"
            )

        # 非流式模式
        else:
            messages = [
                {"role": "user", "content": message}
            ]

            # 生成符合 Qwen 要求的 prompt
            prompt = tokenizer.apply_chat_template(messages, tokenize=False)

            inputs = tokenizer([prompt], return_tensors="pt").to(model.device)
            outputs = model.generate(**inputs, max_new_tokens=1000)
            # response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
            generated_ids = [
                output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, outputs)
            ]

            response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

            return {
                "id": f"chatcmpl-{uuid.uuid4()}",
                "object": "chat.completion",
                "created": int(time.time()),
                "model": "Qwen2.5-0.5B-Instruct",
                "choices": [{
                    "index": 0,
                    "message": {"role": "assistant", "content": response},
                    "finish_reason": "stop"
                }]
            }

    @app.post("/v1/chat/completions")
    async def chat_completion(request: Request):
        data = await request.json()
        message = data['messages']
        stream = data['stream']  # 接收是否流式传输的标识

        # 流式输出模式
        if stream:
            from fastapi.responses import StreamingResponse

            def generate_stream():
                # 初始化流式生成器
                streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
                inputs = tokenizer(message[0]['content'], return_tensors="pt").to(model.device)

                # 生成参数
                generation_kwargs = dict(
                    inputs,
                    streamer=streamer,
                    max_new_tokens=1000,
                    do_sample=True,
                    temperature=0.7
                )

                import transformers
                print(transformers.__version__)

                # 启动生成线程
                import threading
                thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
                thread.start()

                # 创建唯一ID和响应头
                chunk_id = f"chatcmpl-{uuid.uuid4()}"

                # 逐token生成并封装为 OpenAI 格式
                for new_text in streamer:
                    chunk = ChatCompletionChunk(
                        id=chunk_id,
                        model="Qwen2.5-0.5B-Instruct",
                        object="chat.completion.chunk",
                        created=int(time.time()),
                        choices=[{
                            "index": 0,
                            "delta": {"content": new_text},
                            "finish_reason": None
                        }]
                    )
                    yield f"data: {chunk.model_dump_json()}\n\n"

                # 结束标志
                yield "data: [DONE]\n\n"

            return StreamingResponse(
                generate_stream(),
                media_type="text/event-stream"
            )

        # 非流式模式
        else:
            # messages = [
            #     {"role": "user", "content": message}
            # ]

            # 生成符合 Qwen 要求的 prompt
            prompt = tokenizer.apply_chat_template(message, tokenize=False)

            inputs = tokenizer([prompt], return_tensors="pt").to(model.device)
            outputs = model.generate(**inputs, max_new_tokens=1000)
            # response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
            generated_ids = [
                output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, outputs)
            ]

            response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

            return {
                "id": f"chatcmpl-{uuid.uuid4()}",
                "object": "chat.completion",
                "created": int(time.time()),
                "model": "Qwen2.5-0.5B-Instruct",
                "choices": [{
                    "index": 0,
                    "message": {"role": "assistant", "content": response},
                    "finish_reason": "stop"
                }]
            }

def MakeFastAPIOffline(
        app: FastAPI,
        static_dir=Path(__file__).parent / "static",
        static_url="/static-offline-docs",
        docs_url: Optional[str] = "/docs",
        redoc_url: Optional[str] = "/redoc",
) -> None:
    """patch the FastAPI obj that doesn't rely on CDN for the documentation page"""
    from fastapi import Request
    from fastapi.openapi.docs import (
        get_redoc_html,
        get_swagger_ui_html,
        get_swagger_ui_oauth2_redirect_html,
    )
    from fastapi.staticfiles import StaticFiles
    from starlette.responses import HTMLResponse

    openapi_url = app.openapi_url
    swagger_ui_oauth2_redirect_url = app.swagger_ui_oauth2_redirect_url

    def remove_route(url: str) -> None:
        '''
        remove original route from app
        '''
        index = None
        for i, r in enumerate(app.routes):
            if r.path.lower() == url.lower():
                index = i
                break
        if isinstance(index, int):
            app.routes.pop(index)

    # Set up static file mount
    app.mount(
        static_url,
        StaticFiles(directory=Path(static_dir).as_posix()),
        name="static-offline-docs",
    )

    if docs_url is not None:
        remove_route(docs_url)
        remove_route(swagger_ui_oauth2_redirect_url)

        # Define the doc and redoc pages, pointing at the right files
        @app.get(docs_url, include_in_schema=False)
        async def custom_swagger_ui_html(request: Request) -> HTMLResponse:
            root = request.scope.get("root_path")
            favicon = f"{root}{static_url}/favicon.png"
            return get_swagger_ui_html(
                openapi_url=f"{root}{openapi_url}",
                title=app.title + " - Swagger UI",
                oauth2_redirect_url=swagger_ui_oauth2_redirect_url,
                swagger_js_url=f"{root}{static_url}/swagger-ui-bundle.js",
                swagger_css_url=f"{root}{static_url}/swagger-ui.css",
                swagger_favicon_url=favicon,
            )

        @app.get(swagger_ui_oauth2_redirect_url, include_in_schema=False)
        async def swagger_ui_redirect() -> HTMLResponse:
            return get_swagger_ui_oauth2_redirect_html()

    if redoc_url is not None:
        remove_route(redoc_url)

        @app.get(redoc_url, include_in_schema=False)
        async def redoc_html(request: Request) -> HTMLResponse:
            root = request.scope.get("root_path")
            favicon = f"{root}{static_url}/favicon.png"

            return get_redoc_html(
                openapi_url=f"{root}{openapi_url}",
                title=app.title + " - ReDoc",
                redoc_js_url=f"{root}{static_url}/redoc.standalone.js",
                with_google_fonts=False,
                redoc_favicon_url=favicon,
            )



def create_app():
    app = FastAPI()
    register_routes(app)
    MakeFastAPIOffline(app)
    return app

if __name__ == '__main__':
    web_app = create_app()
    uvicorn.run(web_app, host='0.0.0.0', port=8111)


