#!/usr/bin/env python3
"""
配套的API服务器 - 完全可运行版本
"""
from flask import Flask, request, jsonify
from flask_cors import CORS
import torch
import torch.nn as nn
import json
import pickle
import os
import logging

app = Flask(__name__)
CORS(app)

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 全局变量
model = None
tokenizer = None
device = None

# 重定义模型类（必须与训练时一致）
class SimpleModel(nn.Module):
    def __init__(self, vocab_size, embed_dim=64, hidden_dim=128):
        super().__init__()
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        self.hidden_dim = hidden_dim
        
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True, num_layers=2)
        self.output = nn.Linear(hidden_dim, vocab_size)
        self.dropout = nn.Dropout(0.1)
        
    def forward(self, input_ids):
        embeds = self.embedding(input_ids)
        embeds = self.dropout(embeds)
        lstm_out, _ = self.lstm(embeds)
        logits = self.output(lstm_out)
        return logits
    
    def generate(self, tokenizer, prompt, max_length=20):
        self.eval()
        
        input_ids = tokenizer.encode(prompt, max_length=50)
        generated_ids = input_ids.copy()
        
        with torch.no_grad():
            for _ in range(max_length):
                if len(generated_ids) > 50:
                    break
                
                current_input = torch.tensor([generated_ids])
                logits = self.forward(current_input)
                next_token_logits = logits[0, -1, :]
                probabilities = torch.softmax(next_token_logits / 0.8, dim=-1)
                next_token = torch.multinomial(probabilities, 1).item()
                
                if next_token == tokenizer.eos_token_id:
                    break
                
                generated_ids.append(next_token)
        
        generated_text = tokenizer.decode(generated_ids)
        response = generated_text[len(prompt):].strip()
        
        if not response:
            import random
            default_responses = ["我理解了", "好的", "明白了", "可以的", "没问题"]
            response = random.choice(default_responses)
        
        return response

def load_model():
    """加载模型"""
    global model, tokenizer, device
    
    try:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        model_path = 'models/working'
        
        if not os.path.exists(model_path):
            logger.error("模型目录不存在，请先训练模型")
            return False
        
        # 加载配置
        config_path = os.path.join(model_path, 'config.json')
        if not os.path.exists(config_path):
            logger.error("配置文件不存在")
            return False
        
        with open(config_path, 'r', encoding='utf-8') as f:
            config = json.load(f)
        
        # 加载tokenizer
        tokenizer_path = os.path.join(model_path, 'tokenizer.pkl')
        if not os.path.exists(tokenizer_path):
            logger.error("tokenizer文件不存在")
            return False
        
        with open(tokenizer_path, 'rb') as f:
            tokenizer = pickle.load(f)
        
        # 创建并加载模型
        model = SimpleModel(
            vocab_size=config['vocab_size'],
            embed_dim=config['embed_dim'],
            hidden_dim=config['hidden_dim']
        )
        
        weights_path = os.path.join(model_path, 'model_weights.pth')
        if not os.path.exists(weights_path):
            logger.error("模型权重文件不存在")
            return False
        
        model.load_state_dict(torch.load(weights_path, map_location=device))
        model.to(device)
        model.eval()
        
        logger.info("模型加载成功")
        return True
        
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        return False

@app.route('/health', methods=['GET'])
def health_check():
    return jsonify({
        'status': 'healthy',
        'model_loaded': model is not None,
        'device': str(device) if device else None
    })

@app.route('/chat', methods=['POST'])
def chat():
    global model, tokenizer
    
    if model is None or tokenizer is None:
        return jsonify({
            'error': '模型未加载',
            'code': 'MODEL_NOT_LOADED'
        }), 500
    
    try:
        data = request.get_json()
        
        if not data or 'message' not in data:
            return jsonify({
                'error': '缺少必要参数: message',
                'code': 'MISSING_PARAMETER'
            }), 400
        
        user_message = data['message'].strip()
        if not user_message:
            return jsonify({
                'error': '消息不能为空',
                'code': 'EMPTY_MESSAGE'
            }), 400
        
        # 生成参数
        max_length = min(data.get('max_length', 20), 30)  # 限制最大长度
        
        logger.info(f"用户输入: {user_message}")
        
        # 生成回复
        response = model.generate(tokenizer, user_message, max_length=max_length)
        
        logger.info(f"AI回复: {response}")
        
        return jsonify({
            'response': response,
            'input': user_message,
            'parameters': {
                'max_length': max_length
            }
        })
        
    except Exception as e:
        logger.error(f"对话生成失败: {str(e)}")
        return jsonify({
            'error': f'对话生成失败: {str(e)}',
            'code': 'GENERATION_ERROR'
        }), 500

@app.route('/model/info', methods=['GET'])
def model_info():
    global model, tokenizer
    
    if model is None:
        return jsonify({
            'error': '模型未加载',
            'code': 'MODEL_NOT_LOADED'
        }), 500
    
    try:
        total_params = sum(p.numel() for p in model.parameters())
        
        return jsonify({
            'model_type': 'SimpleModel',
            'vocab_size': tokenizer.vocab_size if tokenizer else 0,
            'embed_dim': getattr(model, 'embed_dim', 64),
            'hidden_dim': getattr(model, 'hidden_dim', 128),
            'total_parameters': total_params,
            'device': str(device)
        })
        
    except Exception as e:
        logger.error(f"获取模型信息失败: {str(e)}")
        return jsonify({
            'error': f'获取模型信息失败: {str(e)}',
            'code': 'INFO_ERROR'
        }), 500

@app.route('/model/reload', methods=['POST'])
def reload_model():
    logger.info("重新加载模型")
    
    if load_model():
        return jsonify({'message': '模型重新加载成功'})
    else:
        return jsonify({
            'error': '模型重新加载失败',
            'code': 'RELOAD_FAILED'
        }), 500

@app.errorhandler(404)
def not_found(error):
    return jsonify({
        'error': '接口不存在',
        'code': 'NOT_FOUND'
    }), 404

@app.errorhandler(500)
def internal_error(error):
    return jsonify({
        'error': '服务器内部错误',
        'code': 'INTERNAL_ERROR'
    }), 500

if __name__ == '__main__':
    import argparse
    
    parser = argparse.ArgumentParser(description='AI模型API服务器')
    parser.add_argument('--host', type=str, default='127.0.0.1', help='服务器地址')
    parser.add_argument('--port', type=int, default=5000, help='服务器端口')
    parser.add_argument('--debug', action='store_true', help='调试模式')
    
    args = parser.parse_args()
    
    # 初始化模型
    logger.info("正在初始化AI模型API服务器...")
    model_loaded = load_model()
    
    if not model_loaded:
        logger.warning("模型未加载成功，请先运行: python train_final.py")
    else:
        logger.info("模型加载成功")
    
    # 启动服务器
    logger.info(f"启动API服务器: http://{args.host}:{args.port}")
    app.run(host=args.host, port=args.port, debug=args.debug)