from flask import Flask, request, jsonify
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import threading
import re
import os

os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["OMP_NUM_THREADS"] = "4"

app = Flask(__name__)
MODEL_PATH = "/home/fangning/work/LLM/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
# 来源项目  https://ai.gitee.com/hf-models/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B


def clean_response(text):
    """移除响应中的think标签及其内容"""
    return re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)


class ModelSingleton:
    _instance = None
    _lock = threading.Lock()

    def __new__(cls):
        if cls._instance is None:
            with cls._lock:
                if cls._instance is None:
                    cls._instance = super().__new__(cls)
                    cls._instance.init_model()
        return cls._instance

    def init_model(self):
        self.tokenizer = AutoTokenizer.from_pretrained(
            MODEL_PATH,
            trust_remote_code=True,
            pad_token='<|endoftext|>',
            local_files_only=True
        )
        self.model = AutoModelForCausalLM.from_pretrained(
            MODEL_PATH,
            trust_remote_code=True,
            torch_dtype=torch.float16,
            device_map="auto",
            local_files_only=True
        )
        self.chat_sessions = {}


model_singleton = ModelSingleton()


@app.route('/chat', methods=['POST'])
def chat():
    # 打印完整请求体
    print("Received request body:", request.json)

    data = request.json
    session_id = data.get('session_id', 'default')
    user_input = data['input']

    if session_id not in model_singleton.chat_sessions:
        model_singleton.chat_sessions[session_id] = []

    chat_history = model_singleton.chat_sessions[session_id]
    chat_history.append({"role": "user", "content": user_input})

    inputs = model_singleton.tokenizer.apply_chat_template(
        chat_history,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to(model_singleton.model.device)

    outputs = model_singleton.model.generate(
        input_ids=inputs,
        max_new_tokens=384,
        early_stopping=True,
        do_sample=True,
        top_k=40,
        top_p=0.9,
        temperature=0.7,
        num_return_sequences=1,
        pad_token_id=model_singleton.tokenizer.pad_token_id
    )

    raw_response = model_singleton.tokenizer.decode(
        outputs[0][inputs.shape[1]:],
        skip_special_tokens=True
    )

    cleaned_response = clean_response(raw_response)
    chat_history.append({"role": "assistant", "content": cleaned_response})

    print("Send response message:", cleaned_response)

    # 返回包含请求体和响应的完整信息
    return jsonify({
        "request_body": data,
        "response": cleaned_response
    })


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, threaded=True)
