"""
AI services module for BrightMind AI
Handles all AI/ML related functionality including Hugging Face API calls and image generation
"""
import requests
import base64
from config import HF_TOKEN, HUGGINGFACE_HEADERS
def call_hugging_face_api(prompt):
"""Call Hugging Face API for Albert chatbot"""
if not HF_TOKEN:
return None
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json"
}
data = {
"model": "deepseek-ai/DeepSeek-V3-0324",
"messages": [
{
"role": "user",
"content": prompt
}
],
"max_tokens": 200,
"temperature": 0.7
}
try:
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=data,
timeout=30
)
if response.status_code == 200:
result = response.json()
return result['choices'][0]['message']['content']
else:
print(f"❌ Albert API error: {response.status_code}")
return None
except Exception as e:
print(f"❌ Albert API error: {str(e)}")
return None
def call_hugging_face_api_content(prompt):
"""Call Hugging Face API for content generation with more tokens"""
if not HF_TOKEN:
return None
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json"
}
data = {
"model": "deepseek-ai/DeepSeek-V3-0324",
"messages": [
{
"role": "user",
"content": prompt
}
],
"max_tokens": 1000, # More tokens for content generation
"temperature": 0.7
}
try:
print(f"🌐 Making API call for content generation...")
print(f"🔗 Endpoint: https://router.huggingface.co/v1/chat/completions")
print(f"🔑 Token available: {HF_TOKEN is not None}")
print(f"📝 Prompt length: {len(prompt)} characters")
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=data,
timeout=60 # Longer timeout for content generation
)
print(f"📊 Response status: {response.status_code}")
if response.status_code == 200:
result = response.json()
content = result['choices'][0]['message']['content']
print(f"✅ Content generation successful! Length: {len(content)} characters")
return content
else:
print(f"❌ Content API error: {response.status_code}")
print(f"❌ Response: {response.text}")
return None
except Exception as e:
print(f"❌ Content API error: {str(e)}")
return None
def generate_image_with_huggingface(prompt, topic, content_type):
"""Generate image using Hugging Face Inference API with working models"""
try:
if not HF_TOKEN:
print("No HF_TOKEN available for image generation")
return None
headers = {
"Authorization": f"Bearer {HF_TOKEN}"
}
# Use working models that we've tested
working_models = [
"stabilityai/stable-diffusion-xl-base-1.0",
"stabilityai/stable-diffusion-3-medium-diffusers"
]
# Simple prompt optimized for educational content
clean_prompt = f"educational diagram of {topic}, simple illustration, clean design, white background"
print(f"Generating image for: {topic}")
print(f"Using prompt: {clean_prompt}")
for model_name in working_models:
try:
print(f"Trying model: {model_name}")
# Use the correct API format for HF Inference API
payload = {
"inputs": clean_prompt,
"parameters": {
"num_inference_steps": 20,
"guidance_scale": 7.5
}
}
response = requests.post(
f"https://api-inference.huggingface.co/models/{model_name}",
headers=headers,
json=payload,
timeout=60
)
print(f"Response status: {response.status_code}")
if response.status_code == 200:
content_length = len(response.content)
print(f"Content length: {content_length} bytes")
# Check if we got actual image data (not error JSON)
if content_length > 5000:
try:
# Try to decode as JSON first to check for errors
json_response = response.json()
if "error" in json_response:
print(f"API error: {json_response['error']}")
continue
except:
# If it's not JSON, it should be binary image data
import base64
image_base64 = base64.b64encode(response.content).decode('utf-8')
print(f"Successfully generated image with {model_name}")
return f"data:image/png;base64,{image_base64}"
else:
print("Response too small, likely an error")
continue
elif response.status_code == 503:
# Model is loading - this is common on first request
try:
error_info = response.json()
if "estimated_time" in error_info:
print(f"Model loading, estimated time: {error_info['estimated_time']} seconds")
else:
print("Model is currently loading")
except:
print("Model unavailable (503)")
continue
elif response.status_code == 401:
print("Authentication failed - invalid HF_TOKEN")
break
elif response.status_code == 404:
print(f"Model not found: {model_name}")
continue
else:
print(f"HTTP {response.status_code}: {response.text[:200]}")
continue
except requests.exceptions.Timeout:
print(f"Timeout with model {model_name}")
continue
except Exception as model_error:
print(f"Error with model {model_name}: {str(model_error)}")
continue
print("All models failed or unavailable")
return None
except Exception as e:
print(f"Critical error in image generation: {str(e)}")
return None
def get_educational_image_url(topic, description, content_type):
"""Get educational image URL using Hugging Face generation first, then fallbacks"""
try:
# Create a detailed prompt for image generation
image_prompt = f"Educational illustration of {topic}, {description}, {content_type}, clean diagram, colorful, professional, suitable for middle school students"
print(f"🎨 Generating image with prompt: {image_prompt}")
# Try to generate image with Hugging Face first
generated_image = generate_image_with_huggingface(image_prompt, topic, content_type)
if generated_image:
print(f"✅ Generated image successfully")
return generated_image
# Fallback to reliable sources if generation fails
print("🔄 Image generation failed, using fallback sources")
# Create a more descriptive placeholder based on the description
description_clean = description.replace(' ', '+').replace(',', '').replace(':', '')
topic_clean = topic.replace(' ', '+')
educational_sources = [
# 1. Lorem Picsum (reliable placeholder)
f"https://picsum.photos/800/600?random={hash(topic) % 1000}",
# 2. Placeholder with educational styling and description
f"https://via.placeholder.com/800x600/667eea/ffffff?text={description_clean}",
# 3. Educational diagram generator
f"https://via.placeholder.com/800x600/4f46e5/ffffff?text={topic_clean}+{content_type.replace(' ', '+')}",
# 4. Simple topic-based placeholder
f"https://via.placeholder.com/800x600/10b981/ffffff?text={topic_clean}",
]
# Try each fallback source
for i, url in enumerate(educational_sources):
try:
print(f"🔍 Trying fallback source {i+1}: {url}")
response = requests.head(url, timeout=5)
if response.status_code == 200:
print(f"✅ Found working fallback image: {url}")
return url
except:
continue
# Final fallback
return f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+{content_type.replace(' ', '+')}"
except Exception as e:
print(f"❌ Image URL generation error: {str(e)}")
return f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+{content_type.replace(' ', '+')}"
def generate_real_image(description, topic, content_type):
"""Generate actual image for educational content"""
try:
# Try to generate image with Hugging Face
image_data = generate_image_with_huggingface(description, topic, content_type)
if image_data:
return f'\n\n'
else:
# Fallback to text placeholder
return f"\n**[📸 Image: {description}]**\n*Visual content would appear here*\n"
except Exception as e:
return f"\n**[📸 Image: {description}]**\n*Visual content would appear here*\n"
def generate_image_placeholder(description, topic, content_type):
"""Generate image placeholder with description for educational content (fallback)"""
# Create a structured image placeholder that can be replaced with real images
image_placeholder = f"""
Description: {description}
Topic: {topic}
Content Type: {content_type}