# -*- coding: utf-8 -*-

# mcp_server.py

"""基于仓颉 + DeepSeek + MCP 的智能膳食分析助手"""

from fastapi import FastAPI
from pydantic import BaseModel
import json
from typing import List, Dict, Any
from fastapi.responses import StreamingResponse
from deepseek_client import stream_deepseek, async_stream_deepseek
from text_match import extract_food_simple, extract_food
from loguru import logger
from cachetools import TTLCache
import sys

# 加载食物字典
with open("food_tags.json", encoding="utf-8") as f:
    FOOD_TAGS: Dict[str, Dict[str, Any]] = json.load(f)

app = FastAPI()

class Input(BaseModel):
    input: str

# 全局缓存：prompt -> full_reply_text
CACHE = TTLCache(maxsize=500, ttl=1800)  # 30分钟自动过期

logger.remove()
logger.add(sys.stdout, level="INFO", enqueue=False, backtrace=False)

# ---- 基础角色词 ----
BASE_SYSTEM_PROMPT = (
    "你是一名资深注册营养师，擅长以简洁的 Markdown 格式给出科学、可执行的饮食建议。"
    "所有回答需包含以下小节：\n"
    "1. 总体评价\n"
    "2. 推荐摄入（量/食材）\n"
    "3. 过量风险\n"
    "4. 不宜同食\n"
    "5. 行动建议\n"
    "回答请使用中文，并尽量在 300 字以内。"
)

# ---- 场景化模板 ----
PROMPTS = {
    "nutrition_review": (
        BASE_SYSTEM_PROMPT
        + "\n\n【场景】饮食点评。请先总体评价，再按上表 1~5 小节输出：{input}"
    ),
    "diet_plan": (
        BASE_SYSTEM_PROMPT
        + "\n\n【场景】减脂期餐单。请输出 3 日食谱 (表格形式)，并在每餐注明热量估计：{input}"
    ),
    "effects_inquiry": (
        BASE_SYSTEM_PROMPT
        + "\n\n【场景】过量影响。请列出已知副作用及参考文献：{input}"
    ),
    "avoid_inquiry": (
        BASE_SYSTEM_PROMPT
        + "\n\n【场景】相克查询。请说明不宜同食原因及替代方案：{input}"
    ),
}

def _process_input(user_input: str):
    """内部共用逻辑，返回分析结果和 prompt"""
    # === 1. 食物关键字匹配 ===
    food_list, exact_hits, fuzzy_hits = extract_food(user_input, FOOD_TAGS.keys())

    # 记录匹配详情到日志
    logger.info(f"[匹配] 精确={exact_hits} 模糊={fuzzy_hits}")

    # === 2. 聚合静态信息 ===
    tags = list({tag for food in food_list for tag in FOOD_TAGS[food].get("tags", [])})
    effects = {food: FOOD_TAGS[food].get("effects") for food in food_list if FOOD_TAGS[food].get("effects")}
    avoid_with = list({aw for food in food_list for aw in FOOD_TAGS[food].get("avoid_with", [])})
    diet_type = list({dt for food in food_list for dt in FOOD_TAGS[food].get("diet_type", [])})

    # === 3. 根据关键词判定 mode ===
    mode = "nutrition_review"
    if any(k in user_input for k in ["减肥", "减脂", "低卡", "少油", "瘦身", "个人食谱"]):
        mode = "diet_plan"
    elif any(k in user_input for k in ["吃多了", "过量", "上火", "副作用", "影响"]):
        mode = "effects_inquiry"
    elif any(k in user_input for k in ["不能一起", "相克", "不宜同食", "一起吃"]):
        mode = "avoid_inquiry"

    new_prompt_base = PROMPTS[mode].format(input=user_input)

    # === 4. 拼接静态附加信息 ===
    notes = []
    if effects:
        notes.append("【过量风险】" + "；".join(f"{food}:{desc}" for food, desc in effects.items()))
    if avoid_with:
        notes.append("【不宜同食】" + "、".join(avoid_with))
    extra_info = "；".join(notes)
    if extra_info:
        new_prompt_base += (
            "\n\n以下为已知静态信息（请务必先列出【过量风险】与【不宜同食】两个小节，并完整引用下列内容，否则视为回答不完整）："
            f"{extra_info}"
        )

    # 要求模型以 JSON 输出，配合 response_format
    new_prompt = new_prompt_base + "\n请使用 markdown bullet list 输出建议。"

    return {
        "food_list": food_list,
        "tags": tags,
        "effects": effects,
        "avoid_with": avoid_with,
        "diet_type": diet_type,
        "mode": mode,
        "routed_input": new_prompt,
    }

@app.post("/mcp")
async def route_prompt(data: Input):
    user_input = data.input
    result = _process_input(user_input)
    return result    

@app.post("/mcp_stream")
async def route_prompt_stream(data: Input):
    user_input = data.input
    result = _process_input(user_input)

    async def event_generator():
        meta_json = json.dumps({k: v for k, v in result.items() if k != 'routed_input'}, ensure_ascii=False)
        # 发送 Meta 事件
        yield f"event: meta\ndata: {meta_json}\n\n"

        prompt_key = result["routed_input"]

        # 若缓存命中，直接按20字符切片发送缓存内容
        if prompt_key in CACHE:
            logger.info("[缓存] 命中")
            cached_text = CACHE[prompt_key]
            # SSE data 行不能包含裸换行，需逐行加前缀
            payload_lines = [f"data: {line}" for line in cached_text.split("\n")]
            payload_block = "\n".join(payload_lines)
            yield f"event: token\n{payload_block}\n\n"
            yield "event: done\ndata: [DONE]\n\n"
            return

        logger.info("[缓存] 未命中")
        collected_chunks = []
        async for chunk in async_stream_deepseek(prompt_key):
            collected_chunks.append(chunk)
            yield f"event: token\ndata: {chunk}\n\n"

        # 缓存完整内容
        full_text = "".join(collected_chunks)
        CACHE[prompt_key] = full_text

        # 结束事件
        yield "event: done\ndata: [DONE]\n\n"

    return StreamingResponse(event_generator(), media_type="text/event-stream")    