#例【8-1】
from typing import Dict
from mcp.types import PromptUnit, PromptType

# 模拟从图像编码器提取的文本描述
image_description = "一张展示城市高楼夜景的图像，灯光明亮，天空漆黑"

# 封装为MCP标准PromptUnit
image_prompt = PromptUnit(
    type=PromptType.visual,
    name="city_night_image",
    content=image_description,
    metadata={
        "source": "user-uploaded",
        "encoding": "BLIP2",
        "uri": "https://example.com/image/night-city.jpg"
    }
)

# 注入上下文（伪代码：context_manager.append_prompt(image_prompt)）
print("注入成功：", image_prompt.dict())
输出结果：
{
  "type": "visual",
  "name": "city_night_image",
  "content": "一张展示城市高楼夜景的图像，灯光明亮，天空漆黑",
  "metadata": {
    "source": "user-uploaded",
    "encoding": "BLIP2",
    "uri": "https://example.com/image/night-city.jpg"
  }
}



#例【8-2】
# tools/image_captioner.py

from mcp.server import FastMCP
from mcp.types import ToolFunction, ToolMetadata
from pydantic import BaseModel, Field
from transformers import BlipProcessor, BlipForConditionalGeneration
from PIL import Image
import requests

# Step 1：定义输入结构
class ImageInput(BaseModel):
    url: str = Field(..., description="图像URL地址")

# Step 2：构造FastMCP应用并注册工具
app = FastMCP("image-captioner")

# 加载BLIP模型与预处理器（可替换为更强模型）
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")

@app.tool()
async def generate_caption(data: ImageInput) -> str:
    """
    输入图像URL，输出视觉描述文本
    """
    try:
        image = Image.open(requests.get(data.url, stream=True).raw).convert("RGB")
        inputs = processor(image, return_tensors="pt")
        out = model.generate(**inputs, max_new_tokens=50)
        caption = processor.decode(out[0], skip_special_tokens=True)
        return caption
    except Exception as e:
        return f"图像加载失败：{str(e)}"



#例【8-3】
# tools/image_inference.py

from mcp.server import FastMCP
from pydantic import BaseModel, Field
from transformers import BlipProcessor, BlipForQuestionAnswering
import requests
from PIL import Image

# Step 1：定义MCP服务器
app = FastMCP("image-inference")

# Step 2：载入问答模型
processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")

# Step 3：定义输入格式
class InferenceInput(BaseModel):
    image_url: str = Field(..., description="图像链接")
    question: str = Field(..., description="针对图像的提问")

@app.tool()
async def visual_question_answer(input: InferenceInput) -> str:
    """
    图像问答任务：从图像中获取问题答案
    """
    try:
        image = Image.open(requests.get(input.image_url, stream=True).raw).convert("RGB")
        inputs = processor(image, input.question, return_tensors="pt")
        output = model.generate(**inputs, max_new_tokens=10)
        answer = processor.decode(output[0], skip_special_tokens=True)
        return f"视觉问答结果：问题'{input.question}' 的答案是 '{answer}'"
    except Exception as e:
        return f"推理失败：{str(e)}"



#例【8-4】
# tools/image_region_qa.py

from mcp.server import FastMCP
from pydantic import BaseModel, Field
from PIL import Image
import requests
from transformers import BlipProcessor, BlipForQuestionAnswering

app = FastMCP("region-qa")

processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")

class RegionQuestion(BaseModel):
    image_url: str = Field(..., description="图像地址")
    region_desc: str = Field(..., description="描述图像片段，如'左上角人物'")
    question: str = Field(..., description="基于该区域的提问")

@app.tool()
async def region_question_answer(input: RegionQuestion) -> str:
    """
    支持图像区域级别的问答任务，并封装上下文结构
    """
    image = Image.open(requests.get(input.image_url, stream=True).raw).convert("RGB")
    full_question = f"关于{input.region_desc}：{input.question}"
    inputs = processor(image, full_question, return_tensors="pt")
    out = model.generate(**inputs)
    answer = processor.decode(out[0], skip_special_tokens=True)
    return f"{full_question} 的回答是：{answer}"



#例【8-5】
# tools/audio_transcriber.py

from mcp.server import FastMCP
from pydantic import BaseModel, Field
import whisper
import requests
import os

# 初始化 MCP 应用
app = FastMCP("audio-transcriber")

# Whisper 模型加载（可选择 base, small, medium, large 等）
model = whisper.load_model("base")

# 输入格式
class AudioInput(BaseModel):
    url: str = Field(..., description="远程音频文件地址")
    language: str = Field(default="zh", description="音频语言")

@app.tool()
async def transcribe_audio(data: AudioInput) -> str:
    """
    对音频内容进行转写，并返回文本内容
    """
    audio_path = "temp_audio.mp3"
    try:
        # 下载音频文件
        with open(audio_path, "wb") as f:
            f.write(requests.get(data.url).content)

        # 使用 Whisper 进行转写
        result = model.transcribe(audio_path, language=data.language)
        return f"[语音转写结果]：{result['text']}"
    except Exception as e:
        return f"[转写失败]：{str(e)}"
    finally:
        if os.path.exists(audio_path):
            os.remove(audio_path)



#例【8-6】
# tools/audio_segment_semantics.py

from mcp.server import FastMCP
from pydantic import BaseModel, Field
import torchaudio
from transformers import Wav2Vec2Processor, Wav2Vec2ForSequenceClassification
import torch
import requests
import os

# 初始化 MCP 应用
app = FastMCP("audio-segment-analyzer")

# 加载预训练模型（以情绪分类为例）
processor = Wav2Vec2Processor.from_pretrained("superb/wav2vec2-base-superb-er")
model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er")

# 输入定义
class SegmentInput(BaseModel):
    url: str = Field(..., description="音频片段URL")

@app.tool()
async def analyze_segment(input: SegmentInput) -> str:
    """
    对音频片段进行语义分析并返回情绪标签
    """
    local_path = "segment.wav"
    try:
        with open(local_path, "wb") as f:
            f.write(requests.get(input.url).content)
        waveform, sr = torchaudio.load(local_path)
        inputs = processor(waveform[0], sampling_rate=sr, return_tensors="pt", padding=True)
        with torch.no_grad():
            logits = model(**inputs).logits
            predicted_class_id = torch.argmax(logits, dim=-1).item()
            label = model.config.id2label[predicted_class_id]
        return f"[音频语义编码结果]：识别为情绪状态 —— {label}"
    except Exception as e:
        return f"[语义编码失败]：{str(e)}"
    finally:
        if os.path.exists(local_path):
            os.remove(local_path)



#例【8-7】
# tools/table_to_prompt.py

from mcp.server import FastMCP
from pydantic import BaseModel, Field
import pandas as pd
import os

# 初始化 MCP 服务
app = FastMCP("table-structurer")

class TableInput(BaseModel):
    path: str = Field(..., description="本地CSV文件路径")
    title: str = Field(..., description="表格语义名称")

@app.tool()
async def format_table_as_prompt(data: TableInput) -> str:
    """
    将表格结构展开为自然语言Prompt并注入上下文
    """
    if not os.path.exists(data.path):
        return f"[错误] 文件不存在：{data.path}"
    
    df = pd.read_csv(data.path)
    lines = [f"表格《{data.title}》的摘要如下："]
    columns = list(df.columns)
    
    # 添加列定义信息
    lines.append(f"本表包含以下字段：{'、'.join(columns)}。")
    
    # 逐行转换为自然语言
    for idx, row in df.iterrows():
        row_prompt = "，".join([
            f"{columns[i]}为{row[columns[i]]}" for i in range(len(columns))
        ])
        lines.append(f"第{idx + 1}行：{row_prompt}。")

    # 最终Prompt结果
    prompt_result = "\n".join(lines)
    return prompt_result



#例【8-8】
# tools/document_summarizer.py
from mcp.server import FastMCP
from pydantic import BaseModel, Field
from transformers import pipeline
import re
import os

# 初始化 FastMCP 服务
app = FastMCP("doc-summarizer")

# 使用 HuggingFace 提供的生成式摘要模型
summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")

class DocumentInput(BaseModel):
    path: str = Field(..., description="本地文档路径")
    min_len: int = Field(default=50, description="摘要最小长度")
    max_len: int = Field(default=150, description="摘要最大长度")

@app.tool()
async def summarize_document(input: DocumentInput) -> str:
    """
    将文档按段落进行摘要生成，输出结构化摘要文本
    """
    if not os.path.exists(input.path):
        return f"[错误] 文件不存在：{input.path}"
    
    with open(input.path, "r", encoding="utf-8") as f:
        content = f.read()
    # 使用换行分段，清除空行
    raw_paragraphs = [p.strip() for p in re.split(r"\n{2,}", content) if len(p.strip()) > 30]
    result_lines = ["文档摘要如下："]
    for idx, para in enumerate(raw_paragraphs):
        if len(para) < 100:
            result_lines.append(f"第{idx + 1}段（略短）：{para}")
            continue
        summary = summarizer(para, min_length=input.min_len, max_length=input.max_len)[0]["summary_text"]
        result_lines.append(f"第{idx + 1}段摘要：{summary}")
    return "\n".join(result_lines)