Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,134 @@
|
|
1 |
from huggingface_hub import from_pretrained_fastai
|
2 |
import gradio as gr
|
3 |
from fastai.vision.all import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
|
|
|
|
|
|
5 |
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
learner = from_pretrained_fastai(repo_id)
|
10 |
-
labels = learner.dls.vocab
|
11 |
|
12 |
# Definimos una función que se encarga de llevar a cabo las predicciones
|
13 |
def predict(img):
|
14 |
-
|
15 |
-
pred =
|
16 |
mse = np.mean((img - pred) ** 2)
|
17 |
thresh = 0.02767541486024857
|
18 |
if mse >= thresh:
|
|
|
1 |
from huggingface_hub import from_pretrained_fastai
|
2 |
import gradio as gr
|
3 |
from fastai.vision.all import *
|
4 |
+
from tensorflow.keras.layers import BatchNormalization
|
5 |
+
from tensorflow.keras.layers import Conv2D
|
6 |
+
from tensorflow.keras.layers import Conv2DTranspose
|
7 |
+
from tensorflow.keras.layers import LeakyReLU
|
8 |
+
from tensorflow.keras.layers import Activation
|
9 |
+
from tensorflow.keras.layers import Flatten
|
10 |
+
from tensorflow.keras.layers import Dense
|
11 |
+
from tensorflow.keras.layers import Reshape
|
12 |
+
from tensorflow.keras.layers import Input
|
13 |
+
from tensorflow.keras.models import Model
|
14 |
+
from tensorflow.keras.optimizers.legacy import Adam
|
15 |
+
from tensorflow.keras.datasets import mnist
|
16 |
+
from tensorflow.keras import backend as K
|
17 |
+
from sklearn.model_selection import train_test_split
|
18 |
+
from google.colab.patches import cv2_imshow
|
19 |
+
import matplotlib.pyplot as plt
|
20 |
+
import random
|
21 |
+
import pickle
|
22 |
+
import cv2
|
23 |
+
import numpy as np
|
24 |
|
25 |
+
class ConvAutoencoder:
|
26 |
+
@staticmethod
|
27 |
+
def build(width, height, depth, filters=(32, 64), latentDim=16):
|
28 |
|
29 |
+
# initialize the input shape to be "channels last" along with the channels
|
30 |
+
# dimension itself
|
31 |
+
inputShape = (height, width, depth)
|
32 |
+
chanDim = -1
|
33 |
|
34 |
+
# define the input to the encoder
|
35 |
+
inputs = Input(shape=inputShape)
|
36 |
+
x = inputs
|
37 |
+
|
38 |
+
for f in filters:
|
39 |
+
# apply a CONV => RELU => BN operation
|
40 |
+
x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
|
41 |
+
x = LeakyReLU(alpha=0.2)(x)
|
42 |
+
x = BatchNormalization(axis=chanDim)(x)
|
43 |
+
|
44 |
+
# flatten the network and then construct our latent vector
|
45 |
+
volumeSize = K.int_shape(x)
|
46 |
+
x = Flatten()(x)
|
47 |
+
latent = Dense(latentDim)(x)
|
48 |
+
|
49 |
+
# build the encoder model
|
50 |
+
encoder = Model(inputs, latent, name="encoder")
|
51 |
+
|
52 |
+
# start building the decoder model which will accept the output of the
|
53 |
+
# encoder as its inputs
|
54 |
+
latentInputs = Input(shape=(latentDim,))
|
55 |
+
x = Dense(np.prod(volumeSize[1:]))(latentInputs)
|
56 |
+
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)
|
57 |
+
|
58 |
+
# loop over our number of filters again, but this time in reverse order
|
59 |
+
for f in filters[::-1]:
|
60 |
+
# apply a CONV_TRANSPOSE => RELU => BN operation
|
61 |
+
x = Conv2DTranspose(f, (3, 3), strides=2, padding="same")(x)
|
62 |
+
x = LeakyReLU(alpha=0.2)(x)
|
63 |
+
x = BatchNormalization(axis=chanDim)(x)
|
64 |
+
|
65 |
+
# apply a single CONV_TRANSPOSE layer used to recover the original depth of
|
66 |
+
# the image
|
67 |
+
x = Conv2DTranspose(depth, (3, 3), padding="same")(x)
|
68 |
+
outputs = Activation("sigmoid")(x)
|
69 |
+
|
70 |
+
# build the decoder model
|
71 |
+
decoder = Model(latentInputs, outputs, name="decoder")
|
72 |
+
|
73 |
+
# our autoencoder is the encoder + decoder
|
74 |
+
autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
|
75 |
+
|
76 |
+
return (encoder, decoder, autoencoder)
|
77 |
+
|
78 |
+
|
79 |
+
def build_unsupervised_dataset(data, labels, validLabel=1, anomalyLabel=3,
|
80 |
+
contam=0.01, seed=42):
|
81 |
+
|
82 |
+
# grab all indexes of the supplied class label that are *truly* that particular
|
83 |
+
# label, then grab the indexes of the image labels that will serve as "anomalies"
|
84 |
+
validIdxs = np.where(labels == validLabel)[0]
|
85 |
+
anomalyIdxs = np.where(labels == anomalyLabel)[0]
|
86 |
+
|
87 |
+
random.shuffle(validIdxs)
|
88 |
+
random.shuffle(anomalyIdxs)
|
89 |
+
|
90 |
+
# compute the total number of anomaly data points to select
|
91 |
+
i = int(len(validIdxs) * contam)
|
92 |
+
anomalyIdxs = anomalyIdxs[:i]
|
93 |
+
|
94 |
+
# use NumPy array indexing to extract both the valid images and "anomlay" images
|
95 |
+
validImages = data[validIdxs]
|
96 |
+
anomalyImages = data[anomalyIdxs]
|
97 |
+
|
98 |
+
# stack the valid images and anomaly images together to form a single data
|
99 |
+
# matrix and then shuffle the rows
|
100 |
+
images = np.vstack([validImages, anomalyImages])
|
101 |
+
np.random.seed(seed)
|
102 |
+
np.random.shuffle(images)
|
103 |
+
|
104 |
+
return images
|
105 |
+
|
106 |
+
|
107 |
+
EPOCHS = 20
|
108 |
+
INIT_LR = 1e-3
|
109 |
+
BS = 32
|
110 |
+
|
111 |
+
|
112 |
+
((trainX, trainY), (testX, testY)) = mnist.load_data()
|
113 |
+
|
114 |
+
|
115 |
+
images = build_unsupervised_dataset(trainX, trainY, validLabel=1, anomalyLabel=3,
|
116 |
+
contam=0.01)
|
117 |
+
|
118 |
+
|
119 |
+
(encoder, decoder, autoencoder) = ConvAutoencoder.build(28, 28, 1)
|
120 |
+
opt = Adam(learning_rate=INIT_LR, decay=INIT_LR / EPOCHS)
|
121 |
+
autoencoder.compile(loss="mse", optimizer=opt)
|
122 |
+
|
123 |
+
|
124 |
+
H = autoencoder.fit(trainX, trainX, validation_data=(testX, testX), epochs=EPOCHS,
|
125 |
+
batch_size=BS)
|
126 |
|
|
|
|
|
127 |
|
128 |
# Definimos una función que se encarga de llevar a cabo las predicciones
|
129 |
def predict(img):
|
130 |
+
img = PILImage.create(img)
|
131 |
+
pred = autoencoder.predict(img)
|
132 |
mse = np.mean((img - pred) ** 2)
|
133 |
thresh = 0.02767541486024857
|
134 |
if mse >= thresh:
|