#!/usr/bin/env python3
"""
完全工作的API服务器 - 配套版本
"""
from flask import Flask, request, jsonify
from flask_cors import CORS
import torch
import torch.nn as nn
import json
import os
import logging

app = Flask(__name__)
CORS(app)

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 全局变量
model = None
tokenizer = None

# 完全相同的类定义
class SimpleTokenizer:
    def __init__(self):
        self.word_to_id = {}
        self.id_to_word = {}
        
        vocab = [
            '<PAD>', '<UNK>', '<EOS>',
            '你', '好', '我', '是', '的', '了', '在', '有', '和', '人', '这', '中',
            '一', '个', '不', '也', '就', '会', '能', '说', '可', '以', '要', '他',
            '她', '它', '们', '吗', '呢', '啊', '哦', '嗯', '对', '很', '都', '没',
            'AI', '助手', '帮助', '问题', '回答', '谢谢', '不客气', '再见', '早上好',
            '你好', '是谁', '什么', '怎么', '为什么', '在哪', '多少', '几点',
            '！', '？', '。', '，', '：', '；', '"', '"', ''', ''', '（', '）'
        ]
        
        for i, word in enumerate(vocab):
            self.word_to_id[word] = i
            self.id_to_word[i] = word
        
        self.vocab_size = len(vocab)
        self.pad_token_id = 0
        self.unk_token_id = 1
        self.eos_token_id = 2
    
    def tokenize(self, text):
        tokens = []
        i = 0
        while i < len(text):
            found = False
            for length in range(min(4, len(text) - i), 0, -1):
                word = text[i:i+length]
                if word in self.word_to_id:
                    tokens.append(word)
                    i += length
                    found = True
                    break
            if not found:
                char = text[i]
                if char.strip():
                    tokens.append('<UNK>')
                i += 1
        return tokens
    
    def encode(self, text, max_length=None, padding=False):
        tokens = self.tokenize(text)
        token_ids = [self.word_to_id.get(token, self.unk_token_id) for token in tokens]
        
        if max_length and len(token_ids) > max_length:
            token_ids = token_ids[:max_length]
        
        if padding and max_length:
            while len(token_ids) < max_length:
                token_ids.append(self.pad_token_id)
        
        return token_ids
    
    def decode(self, token_ids):
        if isinstance(token_ids, torch.Tensor):
            token_ids = token_ids.tolist()
        
        words = []
        for token_id in token_ids:
            if token_id in self.id_to_word:
                word = self.id_to_word[token_id]
                if word not in ['<PAD>', '<UNK>', '<EOS>']:
                    words.append(word)
        
        return ''.join(words)
    
    @classmethod
    def from_dict(cls, data):
        tokenizer = cls.__new__(cls)
        tokenizer.word_to_id = data['word_to_id']
        tokenizer.id_to_word = {int(k): v for k, v in data['id_to_word'].items()}
        tokenizer.vocab_size = data['vocab_size']
        tokenizer.pad_token_id = data['pad_token_id']
        tokenizer.unk_token_id = data['unk_token_id']
        tokenizer.eos_token_id = data['eos_token_id']
        return tokenizer

class SimpleModel(nn.Module):
    def __init__(self, vocab_size, embed_dim=64, hidden_dim=128):
        super().__init__()
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        self.hidden_dim = hidden_dim
        
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True, num_layers=2)
        self.output = nn.Linear(hidden_dim, vocab_size)
        self.dropout = nn.Dropout(0.1)
    
    def forward(self, input_ids):
        embeds = self.embedding(input_ids)
        embeds = self.dropout(embeds)
        lstm_out, _ = self.lstm(embeds)
        logits = self.output(lstm_out)
        return logits
    
    def generate(self, tokenizer, prompt, max_length=20):
        self.eval()
        
        input_ids = tokenizer.encode(prompt, max_length=50)
        generated_ids = input_ids.copy()
        
        with torch.no_grad():
            for _ in range(max_length):
                if len(generated_ids) > 50:
                    break
                
                current_input = torch.tensor([generated_ids])
                logits = self.forward(current_input)
                next_token_logits = logits[0, -1, :]
                probabilities = torch.softmax(next_token_logits / 0.8, dim=-1)
                next_token = torch.multinomial(probabilities, 1).item()
                
                if next_token == tokenizer.eos_token_id:
                    break
                
                generated_ids.append(next_token)
        
        generated_text = tokenizer.decode(generated_ids)
        response = generated_text[len(prompt):].strip()
        
        if not response:
            import random
            responses = ["好的", "明白了", "我理解了", "可以的", "没问题"]
            response = random.choice(responses)
        
        return response

def load_model():
    global model, tokenizer
    
    try:
        model_path = 'models/final/complete_model.pth'
        
        if not os.path.exists(model_path):
            logger.error("模型文件不存在，请先运行: python train_complete.py")
            return False
        
        # 加载完整数据
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        data = torch.load(model_path, map_location=device)
        
        # 重建tokenizer
        tokenizer = SimpleTokenizer.from_dict(data['tokenizer'])
        
        # 重建模型
        config = data['config']
        model = SimpleModel(
            vocab_size=config['vocab_size'],
            embed_dim=config['embed_dim'],
            hidden_dim=config['hidden_dim']
        )
        
        # 加载权重
        model.load_state_dict(data['model_state_dict'])
        model.eval()
        
        logger.info("模型加载成功")
        return True
        
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        return False

@app.route('/health', methods=['GET'])
def health():
    return jsonify({
        'status': 'healthy',
        'model_loaded': model is not None,
        'message': 'AI系统运行正常' if model else '模型未加载'
    })

@app.route('/chat', methods=['POST'])
def chat():
    if model is None or tokenizer is None:
        return jsonify({
            'error': '模型未加载，请先训练模型',
            'code': 'MODEL_NOT_LOADED'
        }), 500
    
    try:
        data = request.get_json()
        if not data or 'message' not in data:
            return jsonify({'error': '缺少message参数'}), 400
        
        user_message = data['message'].strip()
        if not user_message:
            return jsonify({'error': '消息不能为空'}), 400
        
        max_length = min(data.get('max_length', 20), 30)
        
        logger.info(f"用户: {user_message}")
        
        response = model.generate(tokenizer, user_message, max_length=max_length)
        
        logger.info(f"AI: {response}")
        
        return jsonify({
            'response': response,
            'input': user_message
        })
        
    except Exception as e:
        logger.error(f"对话失败: {str(e)}")
        return jsonify({'error': '对话生成失败'}), 500

@app.route('/model/info', methods=['GET'])
def model_info():
    if model is None:
        return jsonify({'error': '模型未加载'}), 500
    
    return jsonify({
        'model_type': 'SimpleModel',
        'vocab_size': tokenizer.vocab_size,
        'embed_dim': model.embed_dim,
        'hidden_dim': model.hidden_dim,
        'total_parameters': sum(p.numel() for p in model.parameters())
    })

@app.route('/model/reload', methods=['POST'])
def reload():
    if load_model():
        return jsonify({'message': '模型重新加载成功'})
    else:
        return jsonify({'error': '模型加载失败'}), 500

if __name__ == '__main__':
    logger.info("启动AI系统API服务器...")
    
    # 尝试加载模型
    if load_model():
        logger.info("模型加载成功，系统准备就绪")
    else:
        logger.warning("模型未加载，请先运行: python train_complete.py")
    
    # 启动服务器
    logger.info("API服务器启动: http://127.0.0.1:5001")
    app.run(host='127.0.0.1', port=5001, debug=False)