import json
import threading
import time
import uuid

import torch
from flask import Flask
from flask import Response, request
from peft import PeftModel
from transformers import GPT2LMHeadModel
from transformers import AutoTokenizer
from transformers import TextIteratorStreamer

BASE_MODEL_DIR = "./gpt2-chinese/model"
TOKENIZER_PATH = "./gpt2-chinese/tokenizer"
LoRA_MODEL_DIR = "./gpt2-chinese-lora/model"

app = Flask(__name__)

device = torch.device("cuda" if torch.cuda.is_available() else "mps:0")

tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)
tokenizer.pad_token = '<PAD>'
tokenizer.add_special_tokens({'pad_token': '<PAD>'})
base_model = GPT2LMHeadModel.from_pretrained(BASE_MODEL_DIR)
# 因为自定义了PAD，所以需要同步模型与tokenizer的词汇表大小
base_model.resize_token_embeddings(len(tokenizer))
model = PeftModel.from_pretrained(base_model, LoRA_MODEL_DIR)

@app.route('/generate', methods=['GET'])
def generate_stream(prompt=None):
    inputs = tokenizer([prompt], return_tensors="pt")
    streamer = TextIteratorStreamer(tokenizer)
    generation_kwargs = dict(
        inputs.input_ids,
        streamer=streamer,
        attention_mask=inputs.attention_mask,
        max_new_tokens=200,
        eos_token_id=tokenizer.eos_token_id,
        pad_token_id=tokenizer.pad_token_id
    )
    thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()
    def stream():
        for text in streamer:
            yield f"{text}"

    return Response(stream(), mimetype='text/event-stream')

# 将generate_stream改造为支持OpenAI的API格式
@app.route('/v1/chat/completions', methods=['POST'])
def generate_stream_openai():
    data = request.get_json()
    prompt = data['messages'][-1]['content']

    # 创建唯一ID和响应元数据
    chat_id = f"chatid-{uuid.uuid4()}"
    created_time = int(time.time())
    model_name = "zhouyu-gpt"  # 替换为实际模型名称

    input_text = f"问题：{prompt} 回答："

    inputs = tokenizer([input_text], return_tensors="pt")
    streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)

    generation_kwargs = dict(
        inputs,
        streamer=streamer,
        attention_mask=inputs.attention_mask,
        max_new_tokens=200,
        eos_token_id=tokenizer.eos_token_id,
        pad_token_id=tokenizer.pad_token_id
    )
    thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()

    def stream():
        # 发送每个增量文本块
        for text in streamer:
            event_data = {
                "id": chat_id,
                "object": "chat.completion.chunk",
                "created": created_time,
                "model": model_name,
                "choices": [{
                    "index": 0,
                    "delta": {"content": text.replace(tokenizer.eos_token, '')},
                    "logprobs": None,
                    "finish_reason": None
                }]
            }
            yield f"data: {json.dumps(event_data)}\n\n"


        # 发送结束标志
        end_data = {
            "id": chat_id,
            "object": "chat.completion.chunk",
            "created": created_time,
            "model": model_name,
            "choices": [{
                "index": 0,
                "delta": {},
                "logprobs": None,
                "finish_reason": "stop"
            }]
        }
        yield f"data: {json.dumps(end_data)}\n\n"
        yield "data: [DONE]\n\n"  # OpenAI规范要求的结束标记

    return Response(stream(), mimetype='text/event-stream')


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=6006, debug=True)