from flask import Flask, render_template, request, jsonify
from modelscope import AutoModelForCausalLM, AutoTokenizer
import torch
import os
import json
import time
from datetime import datetime
import subprocess
import sys

app = Flask(__name__)

# 全局变量存储模型和tokenizer
model = None
tokenizer = None
quantized_model = None
quantized_tokenizer = None
use_quantized = False

# API使用统计
api_stats = {
    'generate': {'count': 0, 'last_used': None},
    'rewrite': {'count': 0, 'last_used': None},
    'chat': {'count': 0, 'last_used': None},
    'title': {'count': 0, 'last_used': None},
    'quantized_generate': {'count': 0, 'last_used': None}
}

def load_model():
    """加载模型和tokenizer"""
    global model, tokenizer, quantized_model, quantized_tokenizer, use_quantized
    
    model_name = "Qwen/Qwen3-0.6B"
    cache_dir = os.path.join(os.getcwd(), "model_cache")
    os.makedirs(cache_dir, exist_ok=True)
    
    # 检查是否有量化模型
    quantized_dir = os.path.join(cache_dir, "Qwen_Qwen3-0.6B_awq_4bit")
    if os.path.exists(quantized_dir) and not use_quantized:
        print("检测到量化模型，尝试加载...")
        try:
            # 尝试加载量化模型
            from awq import AutoAWQForCausalLM
            quantized_model = AutoAWQForCausalLM.from_quantized(
                quantized_dir,
                fuse_layers=True,
                trust_remote_code=True,
                safetensors=True,
                device_map="cuda" if torch.cuda.is_available() else "cpu"
            )
            quantized_tokenizer = AutoTokenizer.from_pretrained(
                quantized_dir,
                trust_remote_code=True
            )
            use_quantized = True
            print("量化模型加载完成！")
            return
        except ImportError:
            print("AutoAWQ未安装，使用原始模型")
        except Exception as e:
            print(f"加载量化模型失败: {e}")
    
    # 加载原始模型
    if model is None:
        print("正在加载原始模型...")
        tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            dtype="auto",
            device_map="cuda" if torch.cuda.is_available() else "cpu",
            cache_dir=cache_dir,
            local_files_only=False
        )
        print("原始模型加载完成！")

def generate_response(prompt, max_tokens=2048, temperature=0.7, use_quantized_model=False):
    """生成回答"""
    global use_quantized
    
    if use_quantized_model and use_quantized and quantized_model is not None:
        # 使用量化模型
        update_api_stats('quantized_generate')
        
        messages = [{"role": "user", "content": prompt}]
        text = quantized_tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            enable_thinking=True
        )
        
        model_inputs = quantized_tokenizer([text], return_tensors="pt").to(quantized_model.device)
        
        with torch.no_grad():
            generated_ids = quantized_model.generate(
                **model_inputs,
                max_new_tokens=max_tokens,
                temperature=temperature,
                do_sample=True
            )
        
        output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
        
        # 解析思考内容
        try:
            index = len(output_ids) - output_ids[::-1].index(151668)
        except ValueError:
            index = 0
        
        thinking_content = quantized_tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
        content = quantized_tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
        
        return {
            "thinking": thinking_content,
            "response": content,
            "model_type": "quantized"
        }
    else:
        # 使用原始模型
        if model is None:
            load_model()
        
        messages = [{"role": "user", "content": prompt}]
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True,
            enable_thinking=True
        )
        
        model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
        
        with torch.no_grad():
            generated_ids = model.generate(
                **model_inputs,
                max_new_tokens=max_tokens,
                temperature=temperature,
                do_sample=True
            )
        
        output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
        
        # 解析思考内容
        try:
            index = len(output_ids) - output_ids[::-1].index(151668)
        except ValueError:
            index = 0
        
        thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
        content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
        
        return {
            "thinking": thinking_content,
            "response": content,
            "model_type": "original"
        }

def update_api_stats(api_name):
    """更新API使用统计"""
    api_stats[api_name]['count'] += 1
    api_stats[api_name]['last_used'] = datetime.now().isoformat()

@app.route('/')
def index():
    """主页"""
    return render_template('index.html')

@app.route('/api/status', methods=['GET'])
def api_status():
    """API状态接口"""
    model_info = 'Qwen3-0.6B (原始)'
    if use_quantized and quantized_model is not None:
        model_info = 'Qwen3-0.6B (AQW 4位量化)'
    
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    
    return jsonify({
        'status': 'running',
        'model': 'Qwen/Qwen3-0.6B',
        'model_info': model_info,
        'device': device,
        'model_loaded': model is not None,
        'api_stats': api_stats,
        'server_time': datetime.now().isoformat()
    })

@app.route('/api/generate', methods=['POST'])
def api_generate():
    """文本生成API端点"""
    try:
        update_api_stats('generate')
        data = request.json
        prompt = data.get('prompt', '')
        max_tokens = data.get('max_tokens', 2048)
        temperature = data.get('temperature', 0.7)
        
        if not prompt:
            return jsonify({"error": "请输入文本内容"}), 400
        
        result = generate_response(prompt, max_tokens, temperature)
        return jsonify({
            "success": True,
            "result": result['response'],
            "thinking": result['thinking'],
            "timestamp": datetime.now().isoformat()
        })
    
    except Exception as e:
        return jsonify({"success": False, "error": str(e)}), 500

@app.route('/api/rewrite', methods=['POST'])
def api_rewrite():
    """文案润色API端点"""
    try:
        update_api_stats('rewrite')
        data = request.json
        text = data.get('text', '')
        style = data.get('style', 'professional')
        
        if not text:
            return jsonify({"error": "请输入需要润色的文本"}), 400
        
        # 构建润色提示词
        prompt = f"请将以下文本润色为{style}风格：\n\n{text}\n\n润色后的文本："
        
        result = generate_response(prompt, max_tokens=1024, temperature=0.3)
        return jsonify({
            "success": True,
            "original": text,
            "rewritten": result['response'],
            "thinking": result['thinking'],
            "style": style,
            "timestamp": datetime.now().isoformat()
        })
    
    except Exception as e:
        return jsonify({"success": False, "error": str(e)}), 500



@app.route('/api/quantize', methods=['POST'])
def api_quantize():
    """模型量化API端点"""
    try:
        data = request.json
        bits = data.get('bits', 4)
        model_name = data.get('model', "Qwen/Qwen3-0.6B")
        
        # 运行量化过程
        result = subprocess.run([
            sys.executable, "aqw_quantizer.py",
            "--model", model_name,
            "--bits", str(bits),
            "--benchmark"
        ], capture_output=True, text=True, cwd=os.getcwd())
        
        if result.returncode == 0:
            # 重新加载模型以使用量化版本
            global use_quantized
            use_quantized = True
            load_model()
            
            return jsonify({
                "success": True,
                "message": "模型量化完成",
                "output": result.stdout,
                "bits": bits,
                "timestamp": datetime.now().isoformat()
            })
        else:
            return jsonify({
                "success": False,
                "error": "量化过程失败",
                "output": result.stderr,
                "timestamp": datetime.now().isoformat()
            }), 500
    
    except Exception as e:
        return jsonify({"success": False, "error": str(e)}), 500

@app.route('/api/switch_model', methods=['POST'])
def api_switch_model():
    try:
        data = request.json
        use_quantized_param = data.get('use_quantized', False)
        
        global use_quantized
        use_quantized = use_quantized_param
        
        # 重新加载模型
        load_model()
        
        return jsonify({
            "success": True,
            "message": f"已切换到{'量化' if use_quantized else '原始'}模型",
            "use_quantized": use_quantized,
            "timestamp": datetime.now().isoformat()
        })
    
    except Exception as e:
        return jsonify({"success": False, "error": str(e)}), 500

@app.route('/api/chat', methods=['POST'])
def api_chat():
    """聊天接口"""
    try:
        update_api_stats('chat')
        data = request.json
        prompt = data.get('prompt', '')
        
        if not prompt:
            return jsonify({"error": "请输入问题"}), 400
        
        result = generate_response(prompt)
        return jsonify(result)
    
    except Exception as e:
        return jsonify({"error": str(e)}), 500



if __name__ == '__main__':
    print("启动Web应用...")
    print("可用API端点：")
    print("  GET  /              - 前端界面")
    print("  GET  /api/status    - API状态查询")
    print("  POST /api/generate  - 文本生成 (/generate)")
    print("  POST /api/rewrite  - 文案润色 (/rewrite)")
    print("  POST /chat         - 聊天接口")
    print("\n服务部署于内网环境: http://127.0.0.1:5000")
    
    load_model()
    app.run(host='127.0.0.1', port=5000, debug=True)