from flask import Flask, render_template, request, jsonify
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import torch
import re
import time
import threading

app = Flask(__name__)

# 模型加载标志和线程
model_loading = False
model_loaded = False
model_thread = None
model = None
tokenizer = None

# 加载模型的函数
def load_model(model_path):
    global model, tokenizer, model_loading, model_loaded
    
    try:
        model_loading = True
        print(f"开始加载模型: {model_path}")
        
        # 创建一个临时文件夹用于权重卸载
        import os
        offload_dir = os.path.join(os.getcwd(), "model_offload")
        os.makedirs(offload_dir, exist_ok=True)
        
        # 加载分词器和模型，指定offload_folder
        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(
            model_path,
            torch_dtype=torch.bfloat16,
            device_map="auto",
            trust_remote_code=True,
            offload_folder=offload_dir  # 添加这一行
        )
        # 只加载到CPU
        # model = AutoModelForCausalLM.from_pretrained(
        #     model_path,
        #     torch_dtype=torch.bfloat16,
        #     device_map="cpu",  # 只使用CPU
        #     trust_remote_code=True
        # )

        # 或者只加载到GPU（如果有足够内存）
        # model = AutoModelForCausalLM.from_pretrained(
        #     model_path,
        #     torch_dtype=torch.bfloat16,
        #     device_map="auto",
        #     trust_remote_code=True,
        #     load_in_8bit=True  # 使用8位量化减少内存使用
        # )
        print("模型加载完成")
        model_loaded = True
    except Exception as e:
        print(f"模型加载失败: {str(e)}")
    finally:
        model_loading = False

# 处理单轮对话
def original_model_reasoning(messages: list, max_new_tokens=512):
    global model, tokenizer
    
    if not model_loaded:
        return "模型正在加载中，请稍候..."
    
    try:
        # 生成回复
        input_tensor = tokenizer.apply_chat_template(
            messages,
            add_generation_prompt=True,
            return_tensors="pt"
        ).to(model.device)

        outputs = model.generate(
            input_tensor,
            max_new_tokens=max_new_tokens,
            do_sample=False,
            temperature=0.1,
            num_beams=1,
            repetition_penalty=1.2
        )

        # 后处理回复
        response = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
        return response
    except Exception as e:
        print(f"生成回复时出错: {str(e)}")
        return "抱歉，生成回复时出现错误，请重试。"

# 首页路由
@app.route('/')
def index():
    return render_template('index.html')

# API接口：获取回复
@app.route('/get_response', methods=['POST'])
def get_response():
    data = request.json
    messages = data.get('messages', [])
    
    # 如果模型还没加载，启动加载线程
    if not model_loaded and not model_loading:
        # 请替换为你的实际模型路径
        model_path = r"F:\deepseek-finetune\deepseek-output"
        global model_thread
        model_thread = threading.Thread(target=load_model, args=(model_path,))
        model_thread.daemon = True
        model_thread.start()
        return jsonify({"response": "模型正在加载中，请稍候..."})
    
    # 如果模型正在加载，返回等待消息
    if model_loading:
        return jsonify({"response": "模型正在加载中，请稍候..."})
    
    # 使用模型生成回复
    response = original_model_reasoning(messages)
    return jsonify({"response": response})

if __name__ == '__main__':
    # 请替换为你的实际模型路径
    model_path = r"F:\deepseek-finetune\deepseek-output"

    
    # 在后台线程中加载模型
    model_thread = threading.Thread(target=load_model, args=(model_path,))
    model_thread.daemon = True
    model_thread.start()
    
    # 启动Flask应用
    app.run(debug=True, port=5001)  # 使用5001端口避免冲突