from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from flask import Flask, request, jsonify
from flask_cors import CORS
import threading
import argparse

# 全局变量，用于存储模型和分词器
model = None
tokenizer = None
model_name = "Qwen/Qwen3-0.6B"

# 创建Flask应用实例
app = Flask(__name__)
# 添加CORS支持，允许所有来源的请求
CORS(app)

# 初始化模型和分词器
def initialize_model():
    global model, tokenizer
    print("正在加载模型和分词器...")
    
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(
        model_name,
        trust_remote_code=True
    )
    
    # 加载模型
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        dtype="auto",
        device_map="auto",
        trust_remote_code=True
    )
    
    print("模型加载完成！")
    print("\n=== Qwen3-0.6B 模型部署成功 ===")

# API端点：生成文本
@app.route('/api/generate', methods=['POST'])
def generate_text():
    try:
        # 获取请求数据
        data = request.json
        if not data or 'prompt' not in data:
            return jsonify({'error': '缺少必要的参数: prompt'}), 400
        
        user_input = data['prompt']
        
        # 准备输入
        messages = [{'role': 'user', 'content': user_input}]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            enable_thinking=True
        )
        
        model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
        
        # 生成文本
        with torch.no_grad():
            generated_ids = model.generate(
                **model_inputs,
                max_new_tokens=1024,
                temperature=0.6,
                top_p=0.95
            )
        
        # 解码输出
        response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
        
        # 提取思考内容和回答内容
        result = {
            'original_response': response
        }
        
        if "</think>" in response:
            parts = response.split("</think>")
            if len(parts) >= 3:
                result['thinking'] = parts[1].strip()
                result['answer'] = parts[2].split("<|im_end|>")[0].strip()
        
        return jsonify(result), 200
        
    except Exception as e:
        return jsonify({'error': str(e)}), 500

# API端点：健康检查
@app.route('/api/health', methods=['GET'])
def health_check():
    return jsonify({'status': 'healthy', 'model': model_name}), 200

# 启动Flask服务器
def start_server(host='0.0.0.0', port=5000):
    print(f"\n=== 启动API服务 ===")
    print(f"服务地址: http://{host}:{port}")
    print(f"生成文本API: POST http://{host}:{port}/api/generate")
    print(f"健康检查API: GET http://{host}:{port}/api/health")
    app.run(host=host, port=port, debug=False, threaded=True)

# 交互式对话函数
def interactive_chat():
    print("\n模型已准备就绪，您可以开始提问了。输入'quit'退出。")
    
    while True:
        try:
            user_input = input("\n您的问题: ")
            if user_input.lower() == 'quit':
                break
            
            # 准备输入
            messages = [{"role": "user", "content": user_input}]
            text = tokenizer.apply_chat_template(
                messages,
                tokenize=False,
                add_generation_prompt=True,
                enable_thinking=True  # 启用思考模式
            )
            
            model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
            
            # 生成文本
            with torch.no_grad():
                generated_ids = model.generate(
                    **model_inputs,
                    max_new_tokens=1024,
                    temperature=0.6,
                    top_p=0.95
                )
            
            # 解码输出
            response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
            
            # 尝试提取思考内容和回答内容
            if "</think>" in response:
                parts = response.split("</think>")
                if len(parts) >= 3:
                    thinking_part = parts[1].strip()
                    answer_part = parts[2].split("<|im_end|>")[0].strip()
                    print(f"\n思考内容:\n{thinking_part}")
                    print(f"\n回答内容:\n{answer_part}")
                else:
                    print(f"\n生成内容:\n{response}")
            else:
                print(f"\n生成内容:\n{response}")
                
        except Exception as e:
            print(f"\n生成过程中出错: {e}")

def main():
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='Qwen3-0.6B 模型部署')
    parser.add_argument('--mode', type=str, choices=['api', 'interactive', 'both'], 
                        default='both', help='运行模式: api(仅API服务), interactive(仅交互式), both(两者同时)')
    parser.add_argument('--host', type=str, default='0.0.0.0', help='API服务主机地址')
    parser.add_argument('--port', type=int, default=5000, help='API服务端口')
    args = parser.parse_args()
    
    # 初始化模型
    initialize_model()
    
    # 根据运行模式启动服务或交互式对话
    if args.mode == 'api' or args.mode == 'both':
        if args.mode == 'both':
            # 在单独的线程中启动API服务
            api_thread = threading.Thread(target=start_server, args=(args.host, args.port))
            api_thread.daemon = True
            api_thread.start()
            # 启动交互式对话
            interactive_chat()
        else:
            # 仅启动API服务
            start_server(args.host, args.port)
    else:
        # 仅启动交互式对话
        interactive_chat()

if __name__ == "__main__":
    main()