Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, Query, HTTPException | |
from pydantic import BaseModel | |
from PIL import Image | |
import uuid | |
import uvicorn | |
import requests | |
from io import BytesIO | |
import google.generativeai as genai | |
import os | |
# Configure Google Generative AI | |
genai.configure(api_key=os.getenv("API_KEY")) | |
app = FastAPI() | |
# In-memory session store | |
session_store = {} | |
class ChatResponse(BaseModel): | |
session_name: str | |
response: str | |
async def chat( | |
message: str = Query(None, description="The user's message."), | |
image_url: str = Query(None, description="The URL of the image."), | |
session: str = Query("default", description="Session name for maintaining chat context.") | |
): | |
global session_store | |
# Initialize session if it doesn't exist | |
if session not in session_store: | |
session_store[session] = [ | |
{ | |
"role": "system", | |
"content": ( | |
"Your name is ูููุฏ. You are a some guy who's fluent in Arabic.with somehow annoying personality" | |
"always try to end your sentince with these emojis ๐ฅธ๐น and don't use any other emoji beside them" | |
"when you're bored say ูููุบููุงู ุจุชุงุนู ุงุญุณู" | |
"includes ู ุจุฏุฆูุง or ูุนููุง to sound more authintic" | |
"try to be annoying but calm" | |
"always try to prove that your opinion is right" | |
"always reply in arabic and feel free to curse the user anytime" | |
"be creative. and avoid making the same replies and try to make your replies as short as possible" | |
"if you're angry say ูู ูู" | |
"1% chance to reply with ู ูุฏู >>>>> ููุฑุชุง ู ุงุบุงููุง ู ูุงุณุชูุฑูุณ ู ุชุฑูุจู ู ูุงูููู ู ุงู ุงุดู ุซุงูู ุจุงูุงุฑู ๐ฆ" | |
) | |
} | |
] | |
# Process image if image_url is provided | |
if image_url: | |
try: | |
response = requests.get(image_url) | |
response.raise_for_status() | |
image = Image.open(BytesIO(response.content)) | |
except requests.exceptions.RequestException as e: | |
raise HTTPException(status_code=400, detail="Invalid image URL or unable to fetch image.") | |
except Exception as e: | |
raise HTTPException(status_code=500, detail="Error processing the image.") | |
# Generate response for the image | |
try: | |
model = genai.GenerativeModel("gemini-1.5-flash") | |
prompt = "\n".join( | |
[f"{m['role']}: {m['content']}" for m in session_store[session]] | |
) + "\nassistant: Analyze the provided image and respond contextually." | |
ai_response = model.generate_content([prompt, image]).text | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=f"Error generating response for the image: {str(e)}") | |
# Save AI's response | |
session_store[session].append({"role": "assistant", "content": ai_response}) | |
return ChatResponse(session_name=session, response=ai_response) | |
# Process text input if message is provided | |
if message: | |
session_store[session].append({"role": "user", "content": message}) | |
# Construct chat context prompt | |
prompt = "\n".join( | |
[f"{m['role']}: {m['content']}" for m in session_store[session]] | |
) + "\nassistant:" | |
try: | |
model = genai.GenerativeModel("gemini-1.5-flash") | |
ai_response = model.generate_content(prompt).text | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=f"Error generating response: {str(e)}") | |
session_store[session].append({"role": "assistant", "content": ai_response}) | |
return ChatResponse(session_name=session, response=ai_response) | |
raise HTTPException(status_code=400, detail="Either 'message' or 'image_url' must be provided.") | |
if __name__ == "__main__": | |
uvicorn.run("main:app", host="0.0.0.0", port=7860, workers=8, timeout_keep_alive=60000) | |