Spaces:
Runtime error
Runtime error
File size: 1,813 Bytes
982f352 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import JSONResponse
import cv2
import numpy as np
from tensorflow import keras
import uvicorn
app = FastAPI()
@app.get("/")
async def app_status():
return {"message": "APP is running"}
# model = keras.models.load_model('savedModel/facialemotionmodel.h5')
# def prepare_image(img):
# img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2GRAY) # Convert to grayscale
# img = cv2.resize(img, (48, 48)) # Resize image to 48x48
# img = img / 255.0 # Normalize pixel values to 0-1
# img = img.reshape(1, 48, 48, 1) # Reshape for the model
# return img
# def predict_emotion(img):
# prediction = model.predict(img)
# return prediction
# def interpret_prediction(prediction):
# emotions = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
# max_index = prediction.argmax()
# return emotions[max_index]
# @app.post("/detect-emotion/")
# async def detect_emotion(file: UploadFile = File(...)):
# if file.content_type.startswith('image/'):
# # Read image through file stream
# image_data = await file.read()
# image_array = np.frombuffer(image_data, np.uint8)
# image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
# if image is None:
# raise HTTPException(status_code=400, detail="Could not read the image")
# prepared_image = prepare_image(image)
# prediction = predict_emotion(prepared_image)
# emotion = interpret_prediction(prediction[0])
# return JSONResponse(content={"detected_emotion": emotion})
# else:
# raise HTTPException(status_code=400, detail="File format not supported")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|