

# 必须安装的库
# pip install bitsandbytes>=0.41.1  # 8bit/4bit量化核心
# pip install accelerate>=0.27.0    # 显存优化调度
# pip install transformers>=4.37.0  # 新版模型加载支持

# 确保使用支持新版量化的库版本
# pip install -U "bitsandbytes>=0.43.0" "transformers>=4.38.0" "accelerate>=0.27.0"

from flask import Flask, request, jsonify, stream_with_context, Response
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TextIteratorStreamer

import torch
from flask_cors import CORS  # 解决跨域问题

import queue
from threading import Thread

from transformers.models.cvt.convert_cvt_original_pytorch_checkpoint_to_pytorch import attention

app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}})  # 允许所有域名
model_path = "/home/aresen/1project/2python/hub/DeepSeek-R1-Distill-Qwen-14B"
max_memory = {0: "22GiB", "cpu": "64GiB"}
# 新版量化配置-量化配置（8bit）
quant_config = BitsAndBytesConfig(
    load_in_8bit=True,               # 启用8bit量化
    llm_int8_threshold=6.0,          # 激活值阈值控制精度
    llm_int8_skip_modules=["lm_head", "query_key_value"] # 保持输出层全精度,跳过注意力核心层
)

# 在模型加载时添加缓存机制
from functools import lru_cache, wraps

# 添加API认证
def require_api_key(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        if request.headers.get('X-API-KEY') != 'YOUR_SECRET_KEY':
            return jsonify({"error": "Unauthorized"}), 401
        return func(*args, **kwargs)
    return wrapper

@lru_cache(maxsize=1)
def load_model():
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        device_map="auto",                # 自动分配设备
        quantization_config=quant_config, # 新版量化配置入口
        # load_in_8bit=True,                # 旧版量化配置-启用8bit量化
        # offload_folder="./offload",        # 旧版量化配置-临时交换目录
        torch_dtype=torch.float16,        # 混合精度
        low_cpu_mem_usage=True,           # 减少CPU内存占用
        max_memory=max_memory
    )
    return model

# # 4-bit极致压缩（显存<16GB时使用） -begin
# quant_config = BitsAndBytesConfig(
#     load_in_4bit=True,                # 启用4bit量化
#     bnb_4bit_quant_type="nf4",        # 量化算法（推荐nf4/fp4）
#     bnb_4bit_compute_dtype=torch.float16, # 计算精度
#     bnb_4bit_use_double_quant=True,   # 嵌套量化二次压缩
#     bnb_4bit_quant_storage=torch.uint8 # 存储格式
# )
#
# model = AutoModelForCausalLM.from_pretrained(
#     "/path/to/your/model",
#     device_map="auto",
#     quantization_config=quant_config,
#     trust_remote_code=True            # 需要时启用
# )
# # 4-bit极致压缩（显存<16GB时使用）-end

tokenizer = AutoTokenizer.from_pretrained(model_path)


class AnswerStreamer:
    """适用于旧版transformers的流处理器"""

    def __init__(self, tokenizer, input_length):
        self.tokenizer = tokenizer
        self.input_length = input_length
        self.queue = queue.Queue()
        self.last_text = ""
        self.current_tokens = []

    def put(self, token_id):
        # 新版接口传入的是batch token列表，旧版可能逐个传入
        if isinstance(token_id, torch.Tensor):
            token_id = token_id.item()

        self.current_tokens.append(token_id)
        current_text = self.tokenizer.decode(self.current_tokens, skip_special_tokens=True)

        # 计算增量文本
        if current_text.startswith(self.last_text):
            new_text = current_text[len(self.last_text):]
        else:
            new_text = current_text

        if new_text:
            self.queue.put(new_text)
            self.last_text = current_text

    def end(self):
        self.queue.put(None)

    def __iter__(self):
        while True:
            chunk = self.queue.get()
            if chunk is None: break
            yield chunk

@app.route('/generate', methods=['POST'])
# @require_api_key
def generate():
    data = request.json
    prompt = data['prompt']
    stream = data.get('stream', False)
    # 确保tokenizer正确处理特殊token
    tokenizer.pad_token = tokenizer.eos_token  # 显式设置（如果未设置）

    model = load_model()
    inputs = tokenizer(
        prompt,
        return_tensors="pt",
        padding=True,       #确保生成mask
        return_attention_mask=True  # 强制返回attention_mask
    )
    inputs = inputs.to(model.device)
    input_length = inputs.input_ids.size(1)

    # 生成文本
    generation_config = {
        "input_ids": inputs.input_ids,
        "attention_mask": inputs.attention_mask,  # 关键修复点
        "max_new_tokens": 2048,  # 限制生成长度
        "do_sample": True,
        "temperature": 0.7,
        "top_p": 0.9,
        "repetition_penalty": 1.1,
        "pad_token_id": tokenizer.eos_token_id  # 避免padding分配
    }

    if stream:
        # 使用通用流处理器
        streamer = TextIteratorStreamer(tokenizer, skip_prompt=True)
        generation_config["streamer"] = streamer

        thread = Thread(target=model.generate, kwargs=generation_config)
        thread.start()

        def event_stream():
            for text in streamer:
                yield f"data: {text}\n\n"

        return Response(event_stream(), mimetype="text/event-stream")
    else:
        # 普通响应处理
        outputs = model.generate(**generation_config)
        generated_ids = outputs[0][input_length:] # 仅保留生成部分
        full_response = tokenizer.decode(generated_ids, skip_special_tokens=True)
        print(full_response)
        full_response = full_response.replace('？\n\n', '').replace('</think>', '')
        return jsonify({"result": full_response})


if __name__ == '__main__':
    # 生产环境建议使用WSGI服务器（如gunicorn）
    app.run(
        host='0.0.0.0',
        port=5000,
        threaded=True,  # 启用多线程
        debug=False     # 生产环境关闭debug模式
    )