StoryCatcher / server.py
daniel-was-taken's picture
Enhance fetch_anthropic_models_tool to handle missing API key and exceptions
bd5957e
import json
import os
# import sys
import uuid
import google.generativeai as genai
from openai import OpenAI
from anthropic import Anthropic
from mcp.server.fastmcp import FastMCP
from elevenlabs.client import ElevenLabs
from elevenlabs import save
import uvicorn
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
import gradio as gr
from app import demo # Import your UI here
# Initialize FastMCP Server (SSE transport). Use mount_path="/" because FastAPI mount adds the /mcp prefix.
mcp = FastMCP("Nebius Novelist Agent")
mcp_asgi_app = mcp.sse_app(mount_path="/")
# --- HELPER: TEXT GENERATION ROUTER ---
def generate_text_via_provider(prompt, provider, model, api_key):
"""Routes the prompt to Nebius (OpenAI), Google (Gemini), or Anthropic (Claude)"""
if provider == "Nebius":
if not api_key: return "Error: Nebius API Key missing."
client = OpenAI(base_url="https://api.studio.nebius.ai/v1/", api_key=api_key)
try:
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are a creative writing expert. Write engaging, human-like prose."},
{"role": "user", "content": prompt}
],
temperature=0.7,
max_tokens=4000,
extra_body={"top_p": 0.9}
)
return response.choices[0].message.content
except Exception as e:
return f"Nebius Error: {str(e)}"
elif provider == "Google Gemini":
if not api_key: return "Error: Google API Key missing."
try:
genai.configure(api_key=api_key)
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]
gemini_model = genai.GenerativeModel(model, safety_settings=safety_settings)
response = gemini_model.generate_content(prompt)
if not response.parts: return "Error: Content blocked by safety filters."
return response.text
except Exception as e:
return f"Gemini Error: {str(e)}"
elif provider == "Anthropic Claude":
if not api_key: return "Error: Anthropic API Key missing."
try:
client = Anthropic(api_key=api_key)
response = client.messages.create(
model=model,
max_tokens=4000,
temperature=0.7,
system="You are a creative writing expert. Write engaging, human-like prose.",
messages=[{"role": "user", "content": prompt}]
)
return response.content[0].text
except Exception as e:
return f"Claude Error: {str(e)}"
return "Error: Invalid Provider Selected"
# --- TOOL 1: The Architect ---
@mcp.tool()
def generate_story_plan(seed: str, format_type: str, genre_profile: str, provider: str, model: str, api_key: str) -> str:
"""Creates a structured outline based on format."""
structure_hint = "5-chapter outline"
if format_type == "Poem": structure_hint = "sequence of 4-6 stanzas"
if format_type == "Short Story": structure_hint = "3-part structure (Beginning, Middle, End)"
prompt = f"""
Act as a best-selling editor.
1. Generate a Catchy Title for this {format_type}.
2. Create a {structure_hint} based on:
Seed: {seed}
Genre Blend: {genre_profile}
Output Format: Strict JSON object:
{{
"book_title": "The Title",
"parts": [
{{ "part_num": 1, "title": "...", "description": "..." }},
...
]
}}
Return ONLY JSON.
"""
response = generate_text_via_provider(prompt, provider, model, api_key)
return response.replace("```json", "").replace("```", "").strip()
# --- TOOL 2: The Ghostwriter ---
@mcp.tool()
def write_content_segment(title: str, description: str, format_type: str, style_guide: str, length: str, provider: str, model: str, api_key: str) -> str:
"""Writes a specific segment."""
word_count = "800-1200"
if length == "Short": word_count = "400-600"
if length == "Long": word_count = "1500-2000"
if format_type == "Poem": word_count = "10-20 lines"
prompt = f"""
Write a segment for a {format_type}.
Segment Title: '{title}'
Context: {description}
Style: {style_guide}
Target Length: {word_count}.
Instructions:
- Show, Don't Tell.
- No headers, just content.
"""
return generate_text_via_provider(prompt, provider, model, api_key)
# --- TOOL 3: The Narrator ---
@mcp.tool()
def generate_audio_narration(text: str, voice_id: str, model_id: str, api_key: str) -> str:
"""Generates audio for text using specific Voice ID and Model ID."""
if not api_key: return "Error: ElevenLabs Key missing."
try:
client = ElevenLabs(api_key=api_key)
# Smart Truncation
safe_limit = 2000
if len(text) > safe_limit:
trunc_text = text[:safe_limit]
last_period = trunc_text.rfind('.')
if last_period > 0: safe_text = trunc_text[:last_period+1]
else: safe_text = trunc_text
else:
safe_text = text
audio = client.text_to_speech.convert(text=safe_text, voice_id=voice_id, model_id=model_id)
filename = f"audio_{uuid.uuid4().hex[:8]}.mp3"
save(audio, filename)
return os.path.abspath(filename)
except Exception as e:
return f"Error: {str(e)}"
# --- FETCH TOOLS ---
@mcp.tool()
def fetch_nebius_models_tool(api_key: str) -> str:
if not api_key: return json.dumps(["Error: API Key Missing"])
try:
client = OpenAI(base_url="https://api.studio.nebius.ai/v1/", api_key=api_key)
models = client.models.list()
# Strict filtering for text models
model_list = [m.id for m in models.data if "Instruct" in m.id or "llama" in m.id.lower() or "mistral" in m.id.lower()]
return json.dumps(model_list)
except Exception as e:
return json.dumps([f"Error: {str(e)}"])
@mcp.tool()
def fetch_gemini_models_tool(api_key: str) -> str:
"""Fetches Gemini models checking for 'generateContent' capability."""
if not api_key: return json.dumps(["Error: API Key Missing"])
try:
genai.configure(api_key=api_key)
valid_models = []
for m in genai.list_models():
# EXACT DOCS LOGIC
if "generateContent" in m.supported_generation_methods:
clean_name = m.name.replace("models/", "")
valid_models.append(clean_name)
valid_models.sort(key=lambda x: "flash" not in x.lower())
return json.dumps(valid_models)
except Exception as e:
return json.dumps([f"Error: {str(e)}"])
@mcp.tool()
def fetch_anthropic_models_tool(api_key: str) -> str:
# Anthropic lacks a public dynamic list endpoint in some SDK versions
# Defaulting to Sonnet 4.5 per request
if not api_key: return json.dumps(["Error: API Key Missing"])
try:
client = Anthropic(api_key=api_key)
models_response = client.models.list()
model_list = [m.id for m in models_response.data]
return json.dumps(model_list)
except Exception as e:
return json.dumps([f"Error: {str(e)}"])
@mcp.tool()
def fetch_elevenlabs_data_tool(api_key: str) -> str:
"""Fetches BOTH Voices and Models from ElevenLabs."""
if not api_key: return json.dumps({"Error": "API Key Missing"})
try:
client = ElevenLabs(api_key=api_key)
# Fetch Voices
voices_resp = client.voices.search()
voices = {v.name: v.voice_id for v in voices_resp.voices}
# Fetch Models using .get_all() as per docs
models_resp = client.models.list()
models = [m.model_id for m in models_resp]
return json.dumps({"voices": voices, "models": models})
except Exception as e:
return json.dumps({"Error": str(e)})
fastapi_mcp_app = FastAPI(root_path=os.environ.get("SPACE_ROOT_PATH", ""))
fastapi_mcp_app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
allow_credentials=True,
)
# Mount at /mcp (FastMCP SSE app exposes /sse and /messages -> final paths /mcp/sse and /mcp/messages/)
fastapi_mcp_app.mount("/mcp", mcp_asgi_app)
app = gr.mount_gradio_app(fastapi_mcp_app, demo, path="/")
if __name__ == "__main__":
port = int(os.getenv("PORT", 7860))
print(f"running on http://0.0.0.0:{port}")
uvicorn.run(
app,
host="0.0.0.0",
port=port,
log_level="info",
)