Spaces:
Runtime error
Runtime error
tensorgirl
commited on
Commit
•
9e75a0d
1
Parent(s):
6c1f0f7
Upload 4 files
Browse files- .gitattributes +1 -0
- app.py +139 -0
- requirements.txt +5 -0
- weights.keras +3 -0
- xgb.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
weights.keras filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import tensorflow as tf
|
3 |
+
import keras
|
4 |
+
import numpy as np
|
5 |
+
import sklearn
|
6 |
+
from sklearnex import patch_sklearn, unpatch_sklearn
|
7 |
+
patch_sklearn()
|
8 |
+
import xgboost as xgb
|
9 |
+
|
10 |
+
from keras import Model, Sequential
|
11 |
+
from keras.layers import Layer
|
12 |
+
import keras.layers as nn
|
13 |
+
|
14 |
+
class ChannelAttn(Layer):
|
15 |
+
def __init__(self, c=512) -> None:
|
16 |
+
super(ChannelAttn,self).__init__()
|
17 |
+
self.gap = nn.AveragePooling2D(7)
|
18 |
+
self.attention = Sequential([
|
19 |
+
nn.Dense(32),
|
20 |
+
nn.BatchNormalization(),
|
21 |
+
nn.ReLU(),
|
22 |
+
nn.Dense(c,activation='sigmoid')]
|
23 |
+
)
|
24 |
+
|
25 |
+
def call(self, x):
|
26 |
+
|
27 |
+
x = self.gap(x)
|
28 |
+
x = nn.Flatten()(x)
|
29 |
+
y = self.attention(x)
|
30 |
+
return x * y
|
31 |
+
|
32 |
+
|
33 |
+
class SpatialAttn(Layer):
|
34 |
+
def __init__(self, c=512):
|
35 |
+
super(SpatialAttn,self).__init__()
|
36 |
+
self.conv1x1 = Sequential([
|
37 |
+
nn.Conv2D(256, 1),
|
38 |
+
nn.BatchNormalization()]
|
39 |
+
)
|
40 |
+
self.conv_3x3 = Sequential([
|
41 |
+
nn.ZeroPadding2D(padding=(1, 1)),
|
42 |
+
nn.Conv2D(512, 3,1),
|
43 |
+
nn.BatchNormalization()]
|
44 |
+
)
|
45 |
+
self.conv_1x3 = Sequential([
|
46 |
+
nn.ZeroPadding2D(padding=(0, 1)),
|
47 |
+
nn.Conv2D(512, (1,3)),
|
48 |
+
nn.BatchNormalization()]
|
49 |
+
)
|
50 |
+
self.conv_3x1 = Sequential([
|
51 |
+
nn.ZeroPadding2D(padding=(1, 0)),
|
52 |
+
nn.Conv2D(512,(3,1)),
|
53 |
+
nn.BatchNormalization()]
|
54 |
+
)
|
55 |
+
self.norm = nn.ReLU()
|
56 |
+
|
57 |
+
def call(self, x) :
|
58 |
+
y = self.conv1x1(x)
|
59 |
+
y = self.norm(self.conv_3x3(y) + self.conv_1x3(y) + self.conv_3x1(y))
|
60 |
+
y = tf.math.reduce_sum(y,axis=1, keepdims=True)
|
61 |
+
return x*y
|
62 |
+
|
63 |
+
|
64 |
+
class CrossAttnHead(Layer):
|
65 |
+
def __init__(self, c=512):
|
66 |
+
super(CrossAttnHead,self).__init__()
|
67 |
+
self.sa = SpatialAttn(c)
|
68 |
+
self.ca = ChannelAttn(c)
|
69 |
+
|
70 |
+
def call(self, x):
|
71 |
+
return self.ca(self.sa(x))
|
72 |
+
|
73 |
+
|
74 |
+
@keras.saving.register_keras_serializable(package='custom')
|
75 |
+
class DAN(Model):
|
76 |
+
def __init__(self, num_classes=8,trainable=True,dtype='float32'):
|
77 |
+
super(DAN,self).__init__()
|
78 |
+
self.mod = keras.applications.ResNet50(
|
79 |
+
include_top=False,
|
80 |
+
weights="imagenet",
|
81 |
+
input_shape=(224,224,3)
|
82 |
+
)
|
83 |
+
self.mod.trainable= False
|
84 |
+
self.num_head = 4
|
85 |
+
self.hd = CrossAttnHead()
|
86 |
+
self.hd=[]
|
87 |
+
for i in range(self.num_head):
|
88 |
+
self.hd.append(CrossAttnHead())
|
89 |
+
self.features = nn.Conv2D(512, 1,padding='same')
|
90 |
+
self.fc = nn.Dense(num_classes)
|
91 |
+
self.bn = nn.BatchNormalization()
|
92 |
+
|
93 |
+
def call(self, x) :
|
94 |
+
x = self.mod(x)
|
95 |
+
x=self.features(x)
|
96 |
+
heads = []
|
97 |
+
for h in self.hd:
|
98 |
+
heads.append(h(x))
|
99 |
+
|
100 |
+
heads = tf.transpose(tf.stack(heads),perm=(1,0,2))
|
101 |
+
heads = keras.ops.log_softmax(heads, axis=1)
|
102 |
+
return tf.math.reduce_sum(heads,axis=1)
|
103 |
+
|
104 |
+
backbone = DAN()
|
105 |
+
backbone.load_weights('weights.keras')
|
106 |
+
|
107 |
+
xgb_params = {
|
108 |
+
'objective': 'binary:logistic',
|
109 |
+
'predictor': 'cpu_predictor',
|
110 |
+
'disable_default_eval_metric': 1,
|
111 |
+
}
|
112 |
+
|
113 |
+
model_xgb= xgb.XGBClassifier(**xgb_params)
|
114 |
+
|
115 |
+
model_xgb.load_weights('xgb.json')
|
116 |
+
emotions = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
|
117 |
+
def fn(image):
|
118 |
+
if len(image.shape)==2:
|
119 |
+
img = np.stack([image,image,image],axis=2)
|
120 |
+
img = np.resize(img,(224,224,3))
|
121 |
+
elif len(image.shape)==3 and image.shape[2]==1:
|
122 |
+
img = np.stack([image[:,:,0],image[:,:,0],image[:,:,0]],axis=2)
|
123 |
+
img = np.resize(img,(224,224,3))
|
124 |
+
else:
|
125 |
+
img = np.resize(image,(224,224,3))
|
126 |
+
img = np.expand_dims(img,axis=0)
|
127 |
+
feats = backbone.predict(img)
|
128 |
+
pred = model_xgb.predict(feats)
|
129 |
+
|
130 |
+
return emotions[pred]
|
131 |
+
|
132 |
+
|
133 |
+
demo = gr.Interface(
|
134 |
+
fn,['image'],"text",
|
135 |
+
|
136 |
+
)
|
137 |
+
|
138 |
+
if __name__ == "__main__":
|
139 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy
|
2 |
+
tensorflow
|
3 |
+
scikit-learn-intelex
|
4 |
+
keras
|
5 |
+
xgboost
|
weights.keras
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d0f8e5ebffc14265654c47cf7a433870bc6110b5d4dfd09a0c002e024c07e2b
|
3 |
+
size 133529600
|
xgb.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|