|
from fastapi import FastAPI, UploadFile, File, Request, HTTPException |
|
from fastapi.responses import JSONResponse, HTMLResponse |
|
from fastapi.staticfiles import StaticFiles |
|
from fastapi.templating import Jinja2Templates |
|
from pydantic import BaseModel |
|
import torch |
|
import cv2 |
|
import numpy as np |
|
from PIL import Image |
|
from io import BytesIO |
|
from model import Resnet50FER |
|
from config import Resnet50Config |
|
import requests |
|
import os |
|
from keras.models import model_from_json |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
STATIC_DIR = "static" |
|
|
|
|
|
app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static") |
|
|
|
|
|
templates = Jinja2Templates(directory="templates") |
|
|
|
|
|
config = Resnet50Config(num_classes=10) |
|
|
|
|
|
html_content = """ |
|
<!DOCTYPE html> |
|
<html lang="en"> |
|
<head> |
|
<meta charset="UTF-8"> |
|
<meta name="viewport" content="width=device-width, initial-scale=1.0"> |
|
<title>FastAPI con Imagen de Fondo</title> |
|
<style> |
|
body { |
|
background-image: url('static/background.png'); /* Ruta a tu imagen de fondo en la carpeta 'static' */ |
|
background-size: cover; |
|
background-repeat: no-repeat; |
|
background-attachment: fixed; |
|
font-family: Arial, sans-serif; |
|
color: #333; |
|
margin: 0; |
|
padding: 0. |
|
} |
|
.container { |
|
max-width: 800px; |
|
margin: 0 auto; |
|
padding: 20px; |
|
background-color: rgba(255, 255, 255, 0.8); /* Fondo semi-transparente para mejor legibilidad */ |
|
border-radius: 10px; |
|
box-shadow: 0 0 10px rgba(0, 0, 0, 0.2); |
|
} |
|
</style> |
|
</head> |
|
<body> |
|
<div class="container"> |
|
<h1>FastAPI con Imagen de Fondo</h1> |
|
<p>Ejemplo de una aplicación FastAPI con una imagen de fondo.</p> |
|
<form id="uploadForm" action="/recognize-face/" method="post" enctype="multipart/form-data" onsubmit="uploadImage(event)"> |
|
<input type="file" name="image" accept="image/*" required> |
|
<button type="submit">Subir Imagen</button> |
|
</form> |
|
</div> |
|
<script> |
|
async function uploadImage(event) { |
|
event.preventDefault(); // Evita el envío tradicional del formulario |
|
const formData = new FormData(); |
|
formData.append('image', document.querySelector('input[name="image"]').files[0]); |
|
try { |
|
const response = await fetch('/recognize-face/', { |
|
method: 'POST', |
|
body: formData |
|
}); |
|
if (!response.ok) { |
|
throw new Error('Error al enviar la imagen'); |
|
} |
|
const result = await response.json(); |
|
console.log(result); |
|
} catch (error) { |
|
console.error('Error:', error); |
|
} |
|
} |
|
</script> |
|
</body> |
|
</html> |
|
""" |
|
|
|
|
|
@app.get("/", response_class=HTMLResponse) |
|
async def homepage(request: Request): |
|
return HTMLResponse(content=html_content) |
|
|
|
|
|
resnet_model = Resnet50FER(config) |
|
resnet_model.eval() |
|
|
|
|
|
def download_model_if_not_exists(model_url, model_path): |
|
if not os.path.exists(model_path): |
|
print("Descargando...") |
|
response = requests.get(model_url) |
|
with open(model_path, 'wb') as f: |
|
f.write(response.content) |
|
print("Modelo descargado exitosamente.") |
|
|
|
def load_fer_model(): |
|
model_url = 'https://github.com/jesusninoc/Realtime-Emotion-Detection/raw/master/fer.h5' |
|
model_json_url = 'https://github.com/jesusninoc/Realtime-Emotion-Detection/raw/master/fer.json' |
|
model_path = 'fer.h5' |
|
model_json_path = 'fer.json' |
|
|
|
download_model_if_not_exists(model_url, model_path) |
|
download_model_if_not_exists(model_json_url, model_json_path) |
|
|
|
model = model_from_json(open(model_json_path, "r").read()) |
|
model.load_weights(model_path) |
|
return model |
|
|
|
fer_model = load_fer_model() |
|
|
|
def preprocess_image(image): |
|
img = cv2.resize(image, (48, 48)) |
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
|
img = img / 255.0 |
|
img = np.expand_dims(img, axis=-1) |
|
img = np.expand_dims(img, axis=0) |
|
return img |
|
|
|
def recognize_emotions(image, model): |
|
processed_image = preprocess_image(image) |
|
predictions = model.predict(processed_image) |
|
emotion_label = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral'] |
|
max_index = np.argmax(predictions[0]) |
|
emotion = emotion_label[max_index] |
|
return emotion |
|
|
|
|
|
@app.post("/upload/") |
|
async def upload_image(image: UploadFile = File(...)): |
|
return {"filename": image.filename} |
|
|
|
|
|
@app.post("/predict/") |
|
async def predict(item: UploadFile = File(...)): |
|
|
|
image_bytes = await item.read() |
|
image = Image.open(BytesIO(image_bytes)) |
|
|
|
return {"message": "Endpoint para predicciones con Resnet50FER"} |
|
|
|
|
|
@app.post("/recognize-face/") |
|
async def recognize_face(image: UploadFile = File(...)): |
|
try: |
|
|
|
contents = await image.read() |
|
img = Image.open(BytesIO(contents)) |
|
|
|
|
|
gray = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2GRAY) |
|
|
|
|
|
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') |
|
|
|
|
|
faces = face_cascade.detectMultiScale(gray, 1.3, 5) |
|
|
|
|
|
detected_faces = [{"x": int(x), "y": int(y), "w": int(w), "h": int(h)} for (x, y, w, h) in faces] |
|
|
|
|
|
if len(faces) > 0: |
|
(x, y, w, h) = faces[0] |
|
face_img = gray[y:y+h, x:x+w] |
|
emotion = recognize_emotions(face_img, fer_model) |
|
return JSONResponse(content={"detected_faces": detected_faces, "emotion": emotion}) |
|
|
|
return JSONResponse(content={"detected_faces": detected_faces}) |
|
|
|
except Exception as e: |
|
raise HTTPException(status_code=500, detail=str(e)) |
|
|
|
|
|
if __name__ == "__main__": |
|
import uvicorn |
|
uvicorn.run(app, host="0.0.0.0", port=8000) |
|
|