from flask import Flask, request, jsonify
import torch
import json
import os
from typing import List, Dict, Optional
from config import Config
from model import BertNLU
from transformers import BertTokenizer

app = Flask(__name__)

# 全局资源（仅加载一次）
_model: Optional[BertNLU] = None
_tokenizer: Optional[BertTokenizer] = None
_intent_id2label: Dict[str, str] = {}
_slot_id2label: Dict[str, str] = {}
_o_slot_id: int = -1


def load_resources():
    """加载模型和映射表（首次调用时执行）"""
    global _model, _tokenizer, _intent_id2label, _slot_id2label, _o_slot_id

    if _model is not None:  # 已加载则直接返回
        return

    try:
        # 加载映射表
        with open(f"{Config.model_dir}/intent_map.json", 'r', encoding='utf-8') as f:
            intent_map = json.load(f)
            _intent_id2label = intent_map['id2intent']

        with open(f"{Config.model_dir}/slot_map.json", 'r', encoding='utf-8') as f:
            slot_map = json.load(f)
            _slot_id2label = slot_map['id2slot']
            _o_slot_id = slot_map['slot2id'].get('O', -1)

        # 加载分词器（与训练时保持一致）
        _tokenizer = BertTokenizer.from_pretrained(Config.bert_model)
        if _tokenizer.pad_token is None:
            _tokenizer.pad_token = _tokenizer.eos_token

        # 加载模型（自动选择设备）
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        _model = BertNLU(
            num_intents=len(_intent_id2label),
            num_slots=len(_slot_id2label)
        ).to(device)

        model_path = os.path.join(Config.model_dir, "bert_model.bin")
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件不存在: {model_path}")

        _model.load_state_dict(torch.load(model_path, map_location=device))
        _model.eval()
        print(f"资源加载完成（设备: {device}）\n")

    except Exception as e:
        print(f"资源加载失败: {str(e)}")
        raise


def clean_slot_value(slot_name: str, value: str) -> str:
    """
    槽位值后处理：根据槽位类型过滤无关字符
    """
    if not value:
        return value

    # 针对出发地/目的地：去除介词（到、去、从等）
    if slot_name in ["departure", "destination"]:
        prefixes = {"从", "自", "由"}
        suffixes = {"到", "去", "往", "的"}
        while value and value[0] in prefixes:
            value = value[1:]
        while value and value[-1] in suffixes:
            value = value[:-1]

    # 针对查询类型（query_type）：去除语气词和标点
    elif slot_name == "query_type":
        stop_words = {"吗", "呢", "呀", "吧", "？", "的", "了", "到"}
        while value and value[-1] in stop_words:
            value = value[:-1]

    # 针对日期（datetime_date）：去除末尾的"的"
    elif slot_name == "datetime_date":
        if value.endswith("的"):
            value = value[:-1]

    return value.strip()


def predict(text: str) -> Dict:
    """
    预测文本的意图和槽位并返回结果
    :param text: 输入文本
    :return: 包含意图和槽位信息的字典
    """
    # 加载资源
    load_resources()

    # 输入检查
    text = text.strip()
    if not text:
        return {"error": "输入文本不能为空！"}

    try:
        # 1. 文本预处理
        tokens = _tokenizer.tokenize(text)
        token_ids = _tokenizer.convert_tokens_to_ids(tokens)

        # 截断/填充到最大长度
        if len(token_ids) > Config.max_seq_len:
            token_ids = token_ids[:Config.max_seq_len]
            processed_tokens = tokens[:Config.max_seq_len]
        else:
            pad_length = Config.max_seq_len - len(token_ids)
            token_ids += [_tokenizer.pad_token_id] * pad_length
            processed_tokens = tokens + [_tokenizer.pad_token] * pad_length

        # 注意力掩码
        attention_mask = [1 if tid != _tokenizer.pad_token_id else 0 for tid in token_ids]
        device = next(_model.parameters()).device
        input_ids = torch.tensor([token_ids], dtype=torch.long).to(device)
        mask_tensor = torch.tensor([attention_mask], dtype=torch.long).to(device)

        # 2. 模型预测
        with torch.no_grad():
            intent_logits, slot_logits = _model(input_ids, mask_tensor)

        # 3. 解析意图
        intent_probs = torch.softmax(intent_logits, dim=1).squeeze(0).cpu().numpy()
        intent_idx = int(intent_probs.argmax())
        intent = _intent_id2label.get(str(intent_idx), "unknown")
        intent_confidence = round(float(intent_probs[intent_idx]), 4)

        # 4. 解析槽位
        slot_probs = torch.softmax(slot_logits, dim=2).squeeze(0).cpu().numpy()
        slots: Dict[str, dict] = {}
        current_slot: Optional[str] = None
        current_tokens: List[str] = []
        current_probs: List[float] = []

        for token, mask_val, sp in zip(processed_tokens, attention_mask, slot_probs):
            if mask_val == 0 or token == _tokenizer.pad_token:
                break

            max_slot_idx = int(sp.argmax())
            max_slot_label = _slot_id2label.get(str(max_slot_idx), "O")
            max_prob = float(sp[max_slot_idx])

            # 跳过O标签
            if max_slot_label == "O":
                if current_slot:
                    cleaned_value = clean_slot_value(current_slot, "".join(current_tokens))
                    slots[current_slot] = {
                        "value": cleaned_value,
                        "avg_confidence": round(sum(current_probs) / len(current_probs), 4)
                    }
                    current_slot = None
                    current_tokens = []
                    current_probs = []
                continue

            # 处理B-开头的槽位
            if max_slot_label.startswith("B-"):
                if current_slot:
                    cleaned_value = clean_slot_value(current_slot, "".join(current_tokens))
                    slots[current_slot] = {
                        "value": cleaned_value,
                        "avg_confidence": round(sum(current_probs) / len(current_probs), 4)
                    }
                current_slot = max_slot_label[2:]
                current_tokens = [token]
                current_probs = [max_prob]

            # 处理I-开头的槽位
            elif max_slot_label.startswith("I-") and current_slot == max_slot_label[2:]:
                current_tokens.append(token)
                current_probs.append(max_prob)

            # 其他情况（槽位中断）
            else:
                if current_slot:
                    cleaned_value = clean_slot_value(current_slot, "".join(current_tokens))
                    slots[current_slot] = {
                        "value": cleaned_value,
                        "avg_confidence": round(sum(current_probs) / len(current_probs), 4)
                    }
                    current_slot = None
                    current_tokens = []
                    current_probs = []

        # 处理最后一个槽位
        if current_slot:
            cleaned_value = clean_slot_value(current_slot, "".join(current_tokens))
            slots[current_slot] = {
                "value": cleaned_value,
                "avg_confidence": round(sum(current_probs) / len(current_probs), 4)
            }

        # 返回结果
        return {
            "input_text": text,
            "intent": {
                "label": intent,
                "confidence": intent_confidence
            },
            "slots": slots
        }

    except Exception as e:
        return {"error": f"预测失败: {str(e)}"}


@app.route('/predict', methods=['POST'])
def predict_endpoint():
    """Flask接口：接收文本输入，返回预测结果"""
    data = request.json
    print(data)
    if not data or 'text' not in data:
        return jsonify({"error": "请求必须包含text字段"}), 400

    text = data['text']
    result = predict(text)
    return jsonify(result)


if __name__ == '__main__':
    # 启动前加载资源
    load_resources()
    # 启动Flask服务，默认端口5000，允许外部访问
    app.run(host='0.0.0.0', port=9142, debug=False)