Spaces:
Running
Running
Upload 9 files
Browse files- .gitattributes +2 -0
- api.py +122 -0
- prod_models/emo_modelV2.keras +3 -0
- prod_models/emo_modelV2_tf/fingerprint.pb +3 -0
- prod_models/emo_modelV2_tf/keras_metadata.pb +3 -0
- prod_models/emo_modelV2_tf/saved_model.pb +3 -0
- prod_models/emo_modelV2_tf/variables/variables.data-00000-of-00001 +3 -0
- prod_models/emo_modelV2_tf/variables/variables.index +0 -0
- prod_models/emotion_classifier_pipe_lr.pkl +3 -0
- requirements.txt +18 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
prod_models/emo_modelV2_tf/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
prod_models/emo_modelV2.keras filter=lfs diff=lfs merge=lfs -text
|
api.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Built-in
|
2 |
+
import re
|
3 |
+
import joblib
|
4 |
+
from pathlib import Path
|
5 |
+
import os
|
6 |
+
import uvicorn
|
7 |
+
|
8 |
+
# Dependencies for FastAPI
|
9 |
+
from fastapi import FastAPI
|
10 |
+
from fastapi.responses import RedirectResponse
|
11 |
+
from fastapi.middleware.cors import CORSMiddleware
|
12 |
+
from pydantic import BaseModel
|
13 |
+
|
14 |
+
import keras
|
15 |
+
import tensorflow as tf
|
16 |
+
|
17 |
+
# Set Environment
|
18 |
+
os.environ["KERAS_BACKEND"] = "tensorflow"
|
19 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
|
20 |
+
|
21 |
+
# Setup Paths
|
22 |
+
lr_model_path = Path('./prod_models/emotion_classifier_pipe_lr.pkl')
|
23 |
+
keras_model_path = Path('./prod_models/emo_modelV2.keras')
|
24 |
+
|
25 |
+
# Class for Text Body
|
26 |
+
class Paragraph(BaseModel):
|
27 |
+
input: str
|
28 |
+
|
29 |
+
# Classes
|
30 |
+
classes = ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral']
|
31 |
+
|
32 |
+
# Load the Logistic Regression Model
|
33 |
+
with open(lr_model_path, 'rb') as f:
|
34 |
+
lr_model = joblib.load(f)
|
35 |
+
|
36 |
+
# Load the Keras Model
|
37 |
+
keras_model = keras.models.load_model(str(keras_model_path), compile=True)
|
38 |
+
|
39 |
+
# Start the app
|
40 |
+
app = FastAPI()
|
41 |
+
|
42 |
+
# Setup CORS policy
|
43 |
+
app.add_middleware(
|
44 |
+
CORSMiddleware,
|
45 |
+
allow_origins=["*"],
|
46 |
+
allow_credentials=True,
|
47 |
+
allow_methods=["*"],
|
48 |
+
allow_headers=["*"],
|
49 |
+
)
|
50 |
+
|
51 |
+
# APIs
|
52 |
+
@app.get("/")
|
53 |
+
async def welcome():
|
54 |
+
return {"message":"Hello, this is the API server for Sentimetry. Go to /docs to test the APIs."}
|
55 |
+
|
56 |
+
@app.get("/d")
|
57 |
+
async def docs():
|
58 |
+
# return {"message":"Hello, this is the API server for Sentimetry. Go to /docs to test the APIs."}
|
59 |
+
return RedirectResponse(url="/docs")
|
60 |
+
|
61 |
+
@app.post("/logistic-regression")
|
62 |
+
async def predict_emotions_lr(paragraph : Paragraph):
|
63 |
+
# Split the huge chunk of text into a list of strings
|
64 |
+
text_list = [text.strip() for text in re.split(r'[.!?;\n]', paragraph.input) if text.strip()]
|
65 |
+
|
66 |
+
# Create a list to store predictions per text
|
67 |
+
predictions_per_text = []
|
68 |
+
for text in text_list:
|
69 |
+
emotion = [{'label': label, 'score': score} for label, score in zip(lr_model.classes_, lr_model.predict_proba([text])[0])]
|
70 |
+
predictions_per_text.append(emotion)
|
71 |
+
|
72 |
+
# Create a dictionary to aggregate scores for each label
|
73 |
+
total = {}
|
74 |
+
|
75 |
+
# Iterate over each list and aggregate the scores
|
76 |
+
for prediction in predictions_per_text:
|
77 |
+
for emotion_dict in prediction:
|
78 |
+
label = emotion_dict['label']
|
79 |
+
score = emotion_dict['score']
|
80 |
+
total[label] = total.get(label, 0) + score
|
81 |
+
|
82 |
+
# Convert the dictionary to a list of dictionaries
|
83 |
+
result = [{"label": label, "score": score} for label, score in total.items()]
|
84 |
+
|
85 |
+
# Sort the result in descending order based on score
|
86 |
+
sorted_result = sorted(result, key=lambda x: x['score'], reverse=True)
|
87 |
+
|
88 |
+
return {"predictions": sorted_result}
|
89 |
+
|
90 |
+
@app.post("/keras")
|
91 |
+
async def predict_emotions_keras(paragraph : Paragraph):
|
92 |
+
# Split the huge chunk of text into a list of strings
|
93 |
+
text_list = [text.strip() for text in re.split(r'[.!?;\n]', paragraph.input) if text.strip()]
|
94 |
+
|
95 |
+
# Create a list to store predictions per text
|
96 |
+
predictions_per_text = []
|
97 |
+
for text in text_list:
|
98 |
+
scores = keras_model(tf.constant([text]))['dense_1'][0]
|
99 |
+
emotion = [{'label': label, 'score': score} for label, score in zip(classes, scores.numpy())]
|
100 |
+
print(emotion)
|
101 |
+
predictions_per_text.append(emotion)
|
102 |
+
|
103 |
+
# Create a dictionary to aggregate scores for each label
|
104 |
+
total = {}
|
105 |
+
|
106 |
+
# Iterate over each list and aggregate the scores
|
107 |
+
for prediction in predictions_per_text:
|
108 |
+
for emotion_dict in prediction:
|
109 |
+
label = emotion_dict['label']
|
110 |
+
score = emotion_dict['score']
|
111 |
+
total[label] = total.get(label, 0) + score
|
112 |
+
|
113 |
+
# Convert the dictionary to a list of dictionaries
|
114 |
+
result = [{"label": label, "score": score} for label, score in total.items()]
|
115 |
+
|
116 |
+
# Sort the result in descending order based on score
|
117 |
+
sorted_result = sorted(result, key=lambda x: x['score'], reverse=True)
|
118 |
+
|
119 |
+
return {"predictions": sorted_result}
|
120 |
+
|
121 |
+
if __name__ == "__main__":
|
122 |
+
uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=True)
|
prod_models/emo_modelV2.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:edc135bd8a6f05f7715b1c8ab03c56dd6c2c55da11a0ce123c1ca928462c9e57
|
3 |
+
size 2466847
|
prod_models/emo_modelV2_tf/fingerprint.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f5c9e6db6ba30440ddb2c4287e4d5802f7f540363f176ec3c4b8f11ff374c8f
|
3 |
+
size 55
|
prod_models/emo_modelV2_tf/keras_metadata.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0fa77c6e2dfb1a71abe82d35ab88e7672f93acf280ab9a396d77cb3ebfa03465
|
3 |
+
size 56699
|
prod_models/emo_modelV2_tf/saved_model.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3bcab7e06a0a250f5f2048e5aa8ee6776fd51a29c022490f282927aa19ace6b1
|
3 |
+
size 6399018
|
prod_models/emo_modelV2_tf/variables/variables.data-00000-of-00001
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3fda87c354e10742eeb81fa5f47b09a7d705a3af7922d77b5a9920cfe072a5ec
|
3 |
+
size 5538785
|
prod_models/emo_modelV2_tf/variables/variables.index
ADDED
Binary file (4.85 kB). View file
|
|
prod_models/emotion_classifier_pipe_lr.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:19927a0dc7b78159eee283a8b7adc65956ca49c1f9d48476bb4b46ab5cee3216
|
3 |
+
size 7631333
|
requirements.txt
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py>=2.1.0
|
2 |
+
fastapi>=0.110.0
|
3 |
+
joblib>=1.3.2
|
4 |
+
keras>=3.0.5
|
5 |
+
matplotlib>=3.8.3
|
6 |
+
numpy>=1.26.4
|
7 |
+
pandas>=2.2.1
|
8 |
+
protobuf>=4.25.3
|
9 |
+
pydantic>=2.6.4
|
10 |
+
pydantic_core>=2.16.3
|
11 |
+
retvec>=1.0.1
|
12 |
+
scikit-learn>=1.4.1.post1
|
13 |
+
scipy>=1.12.0
|
14 |
+
six>=1.16.0
|
15 |
+
starlette>=0.36.3
|
16 |
+
tensorboard>=2.16.2
|
17 |
+
tensorflow>=2.16.1
|
18 |
+
uvicorn>=0.28.0
|