import json
import threading
import time
import uuid

import torch
from datetime import datetime
from flask import Flask, Response, request
from modelscope import AutoModelForCausalLM
from modelscope import AutoTokenizer
from peft import PeftModel
from transformers import TextIteratorStreamer

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = "Qwen/Qwen3-0.6B"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = PeftModel.from_pretrained(model, "./Zhouyu-Qwen3-0.6B")


# 命令行安装Flask， pip install flask

def chat(prompt=None):
    messages = [
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
        enable_thinking=False
    )
    model_inputs = tokenizer([text], return_tensors="pt")
    generated_ids = model.generate(**model_inputs, max_new_tokens=32768)
    content = tokenizer.decode(generated_ids[0][len(model_inputs.input_ids[0]):], skip_special_tokens=True)
    return content

app = Flask(__name__)


@app.route('/generate', methods=['GET'])
def generate():
    prompt = request.args.get('prompt')
    messages = [
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
        enable_thinking=False
    )
    model_inputs = tokenizer([text], return_tensors="pt")
    generated_ids = model.generate(**model_inputs, max_new_tokens=32768)
    content = tokenizer.decode(generated_ids[0][len(model_inputs.input_ids[0]):], skip_special_tokens=True)
    return content


@app.route('/generate_stream', methods=['GET'])
def generate_stream():
    prompt = request.args.get('prompt')
    messages = [
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
        enable_thinking=False
    )
    model_inputs = tokenizer([text], return_tensors="pt")

    # 创建流式生成器
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)

    generation_kwargs = dict(
        **model_inputs,
        streamer=streamer,
        max_new_tokens=32768,
    )
    thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()

    def stream():
        for text in streamer:
            yield f"{text}"

    return Response(stream(), mimetype='text/event-stream')


# 将generate_stream改造为支持OpenAI的API格式
@app.route('/v1/chat/completions', methods=['POST'])
def chat_completions():
    data = request.json
    messages = data.get('messages', [])
    model_name = 'zhouyu-qwen3'

    # 唯一请求ID和时间戳
    request_id = f"chatcmpl-{uuid.uuid4()}"
    created = int(time.time())

    # 流式响应处理
    def generate_stream():
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            enable_thinking=False
        )
        model_inputs = tokenizer([text], return_tensors="pt")

        # 创建流式生成器
        streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)

        generation_kwargs = dict(
            **model_inputs,
            streamer=streamer,
            max_new_tokens=32768,
        )
        thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
        thread.start()

        # 流式响应格式
        full_response = ""
        is_first_chunk = True

        try:
            for token in streamer:
                # OpenAI 格式要求第一块设置角色
                if is_first_chunk:
                    role_chunk = format_openai_stream_response(
                        request_id, created, model_name, "", role="assistant"
                    )
                    yield f"data: {json.dumps(role_chunk)}\n\n"
                    is_first_chunk = False

                full_response += token

                # 发送文本块
                chunk = format_openai_stream_response(
                    request_id, created, model_name, token
                )
                yield f"data: {json.dumps(chunk)}\n\n"

            # 发送完成块
            completion_chunk = format_openai_stream_response(
                request_id, created, model_name, "", finish_reason="stop"
            )
            yield f"data: {json.dumps(completion_chunk)}\n\n"

        finally:
            yield "data: [DONE]\n\n"
            if thread.is_alive():
                thread.join(timeout=5)

    return Response(generate_stream(), mimetype='text/event-stream')


def format_openai_response(id, created, model_name, content, role="assistant", finish_reason="stop"):
    """按照OpenAI API规范格式化响应"""
    return {
        "id": id,
        "object": "chat.completion",
        "created": created,
        "model": model_name,
        "choices": [
            {
                "index": 0,
                "message": {
                    "role": role,
                    "content": content
                },
                "finish_reason": finish_reason
            }
        ],
        "usage": {
            "prompt_tokens": 0,
            "completion_tokens": 0,
            "total_tokens": 0
        }
    }


def format_openai_stream_response(id, created, model_name, content, role="assistant", finish_reason=None):
    """按照OpenAI API规范格式化流式响应"""
    return {
        "id": id,
        "object": "chat.completion.chunk",
        "created": created,
        "model": model_name,
        "choices": [
            {
                "index": 0,
                "delta": {
                    "role": role,
                    "content": content
                },
                "finish_reason": finish_reason
            }
        ]
    }

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=6006, debug=True)
