Spaces:
Runtime error
Runtime error
from fastapi import FastAPI, File, UploadFile, HTTPException | |
from fastapi.responses import JSONResponse | |
import cv2 | |
import numpy as np | |
from tensorflow import keras | |
import uvicorn | |
app = FastAPI() | |
async def app_status(): | |
return {"message": "APP is running"} | |
# model = keras.models.load_model('savedModel/facialemotionmodel.h5') | |
# def prepare_image(img): | |
# img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2GRAY) # Convert to grayscale | |
# img = cv2.resize(img, (48, 48)) # Resize image to 48x48 | |
# img = img / 255.0 # Normalize pixel values to 0-1 | |
# img = img.reshape(1, 48, 48, 1) # Reshape for the model | |
# return img | |
# def predict_emotion(img): | |
# prediction = model.predict(img) | |
# return prediction | |
# def interpret_prediction(prediction): | |
# emotions = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise'] | |
# max_index = prediction.argmax() | |
# return emotions[max_index] | |
# @app.post("/detect-emotion/") | |
# async def detect_emotion(file: UploadFile = File(...)): | |
# if file.content_type.startswith('image/'): | |
# # Read image through file stream | |
# image_data = await file.read() | |
# image_array = np.frombuffer(image_data, np.uint8) | |
# image = cv2.imdecode(image_array, cv2.IMREAD_COLOR) | |
# if image is None: | |
# raise HTTPException(status_code=400, detail="Could not read the image") | |
# prepared_image = prepare_image(image) | |
# prediction = predict_emotion(prepared_image) | |
# emotion = interpret_prediction(prediction[0]) | |
# return JSONResponse(content={"detected_emotion": emotion}) | |
# else: | |
# raise HTTPException(status_code=400, detail="File format not supported") | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=8000) | |