from flask import Flask, request, Response, jsonify
import onnxruntime_genai as og
from tokenizer_factory import get_tokenizer
import os
import json
from collections import defaultdict
from threading import Lock

app = Flask(__name__)

# ========== 全局缓存 ==========
model_dir = "C:\\Users\\wisemodel-aipc-03-e\\Desktop\\s-he\\s-he\\model"
model_cache = {}  # {model_path: (model, tokenizer)}
context_dict = defaultdict(dict)  # {model_path: {user_id: DialogueContext}}
MAX_CONTEXT_TOKENS = 1024
model_load_lock = Lock()  # 避免重复加载冲突

# ========== 会话上下文类 ==========
class DialogueContext:
    def __init__(self, tokenizer, max_context_tokens):
        self.history = []
        self.tokenizer = tokenizer
        self.max_context_tokens = max_context_tokens

    def add_turn(self, user_input, model_output):
        self.history.append((user_input, model_output))

    def get_prompt(self, new_input):
        temp = f"<|user|> {new_input}\n<|assistant|> "
        # temp = f"{new_input}\n"
        token_ids = self.tokenizer.encode(temp)
        # print(f"len of token:{len(token_ids)}")
        if len(token_ids) <= self.max_context_tokens:
            return temp
        return temp

# ========== 模型加载器 ==========
def get_model_and_tokenizer(model_path):
    if model_path in model_cache:
        return model_cache[model_path]

    with model_load_lock:
        if model_path not in model_cache:  # 双重检查
            config_path = os.path.join(model_path, 'genai_config.json')
            with open(config_path, 'r') as f:
                model_type = json.load(f)['model']['type']

            model = og.Model(model_path)
            tokenizer = get_tokenizer(model_path, model_type, model)
            model_cache[model_path] = (model, tokenizer)
            print(f"✅ 已加载模型: {model_path}")

    return model_cache[model_path]

# ========== 流式生成 ==========
def generate_stream(model, tokenizer, prompt, args):
    params = og.GeneratorParams(model)
    params.set_search_options(**{k: v for k, v in args.items() if v is not None})
    params.try_graph_capture_with_max_batch_size(1)

    generator = og.Generator(model, params)
    generator.append_tokens(tokenizer.encode(prompt))

    prev_output = ""
    assistant_tag = "<|assistant|>"
    assistant_offset = prompt.find(assistant_tag) + len(assistant_tag)
    print("output:")

    while not generator.is_done():
        generator.generate_next_token()
        current_output = tokenizer.decode(generator.get_sequence(0))
        new_part = current_output[assistant_offset:][len(prev_output):]
        prev_output = current_output[assistant_offset:]
        # print(current_output)
        print(new_part,end="",flush=True)
        yield f"data: {json.dumps({'content': new_part})}\n\n"

# ========== 聊天接口 ==========
@app.route("/api/chat", methods=["POST"])
def chat_api():
    data = request.get_json()
    model_name = data.get("model_name")
    if not model_name:
        return jsonify({"error": "缺少 model_name"}), 400

    model_path = os.path.join(model_dir,model_name)

    user_id = data.get("user_id", "default")
    user_input = data.get("message", "")
    args = data.get("generation_config", {})

    # 获取模型
    model, tokenizer = get_model_and_tokenizer(model_path)

    # 获取上下文
    if user_id not in context_dict[model_path]:
        context_dict[model_path][user_id] = DialogueContext(tokenizer, MAX_CONTEXT_TOKENS)
    context = context_dict[model_path][user_id]

    prompt = context.get_prompt(user_input)
    print(f"[{model_path}][{user_id}] prompt: {prompt}")

    def stream_response():
        output = ""
        for chunk in generate_stream(model, tokenizer, prompt, args):
            yield chunk
            content = json.loads(chunk.strip().replace("data: ", ""))["content"]
            output += content
        # context.add_turn(user_input, output)

    return Response(stream_response(), mimetype='text/event-stream')

if __name__ == "__main__":
    app.run(port=5001, threaded=True)
