Spaces:
Runtime error
Runtime error
samyakkhatua
commited on
Commit
•
982f352
1
Parent(s):
06ab76a
app.py
Browse files- app.py +50 -0
- requirements.txt +7 -0
app.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
from tensorflow import keras
|
6 |
+
import uvicorn
|
7 |
+
|
8 |
+
app = FastAPI()
|
9 |
+
|
10 |
+
@app.get("/")
|
11 |
+
async def app_status():
|
12 |
+
return {"message": "APP is running"}
|
13 |
+
|
14 |
+
# model = keras.models.load_model('savedModel/facialemotionmodel.h5')
|
15 |
+
|
16 |
+
# def prepare_image(img):
|
17 |
+
# img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2GRAY) # Convert to grayscale
|
18 |
+
# img = cv2.resize(img, (48, 48)) # Resize image to 48x48
|
19 |
+
# img = img / 255.0 # Normalize pixel values to 0-1
|
20 |
+
# img = img.reshape(1, 48, 48, 1) # Reshape for the model
|
21 |
+
# return img
|
22 |
+
|
23 |
+
# def predict_emotion(img):
|
24 |
+
# prediction = model.predict(img)
|
25 |
+
# return prediction
|
26 |
+
|
27 |
+
# def interpret_prediction(prediction):
|
28 |
+
# emotions = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
|
29 |
+
# max_index = prediction.argmax()
|
30 |
+
# return emotions[max_index]
|
31 |
+
|
32 |
+
# @app.post("/detect-emotion/")
|
33 |
+
# async def detect_emotion(file: UploadFile = File(...)):
|
34 |
+
# if file.content_type.startswith('image/'):
|
35 |
+
# # Read image through file stream
|
36 |
+
# image_data = await file.read()
|
37 |
+
# image_array = np.frombuffer(image_data, np.uint8)
|
38 |
+
# image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
39 |
+
# if image is None:
|
40 |
+
# raise HTTPException(status_code=400, detail="Could not read the image")
|
41 |
+
|
42 |
+
# prepared_image = prepare_image(image)
|
43 |
+
# prediction = predict_emotion(prepared_image)
|
44 |
+
# emotion = interpret_prediction(prediction[0])
|
45 |
+
# return JSONResponse(content={"detected_emotion": emotion})
|
46 |
+
# else:
|
47 |
+
# raise HTTPException(status_code=400, detail="File format not supported")
|
48 |
+
|
49 |
+
if __name__ == "__main__":
|
50 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
requests
|
4 |
+
python-multipart
|
5 |
+
opencv-python-headless
|
6 |
+
tensorflow
|
7 |
+
numpy
|