Spaces:
Sleeping
Sleeping
Commit
·
8b8200f
1
Parent(s):
4e1d18b
Update app.py
Browse files
app.py
CHANGED
@@ -13,30 +13,29 @@ from scipy.fftpack import dct
|
|
13 |
|
14 |
|
15 |
|
16 |
-
def crop_image(
|
17 |
#This function will return a value which indicates whether the crop succeeded or not
|
18 |
#and why it failed, if it did. It will also return the cropped image if succeeded, and the original one otherwise.
|
19 |
#-1 indicates failure because of grayscale or non-RGB.
|
20 |
#-2 indicates failure because image is too small to crop
|
21 |
# 1 indicates success.
|
22 |
-
if len(
|
23 |
-
return -1,
|
24 |
-
x_dim, y_dim =
|
25 |
if x_dim < 256 or y_dim < 256:
|
26 |
-
return -2,
|
27 |
-
image = im_lib.fromarray(image_array)
|
28 |
left, upper = randint(0, x_dim-256), randint(0, y_dim-256)
|
29 |
right, lower = 256+left, 256+upper
|
30 |
image = image.crop((left,upper,right,lower))
|
31 |
-
return 1,
|
32 |
|
33 |
def HPF_filter(image):
|
34 |
return im_lib.fromarray(np.asarray(image)-np.asarray(image.filter(ImageFilter.GaussianBlur)))
|
35 |
|
36 |
-
def final_image_array_single(
|
37 |
-
cropped_key,
|
38 |
if cropped_key == 1:
|
39 |
-
image_array = np.asarray(HPF_filter(
|
40 |
return image_array
|
41 |
|
42 |
def create_model_single(model_weights_file = "single_channel_model_best_val_real_precision_weights"):
|
@@ -62,29 +61,25 @@ def create_model_single(model_weights_file = "single_channel_model_best_val_real
|
|
62 |
|
63 |
model_test_single = create_model_single()
|
64 |
|
65 |
-
def real_or_not_single(
|
66 |
-
cropped_key,
|
67 |
if cropped_key == -1:
|
68 |
return "This image cannot be processed as it is not RGB."
|
69 |
elif cropped_key == -2:
|
70 |
return "This image is too small to be processed."
|
71 |
else:
|
72 |
-
image_array = final_image_array_single(
|
73 |
prediction_list = model.predict(image_array.reshape(1,256,256,3))
|
74 |
if np.argmax(prediction_list) == len(prediction_list[0]) - 1:
|
75 |
return "This image is probably real."
|
76 |
else:
|
77 |
-
keywords = ["biggan", "crn", "cyclegan","deepfake","gaugan","imle","progan","san","seeingdark","stargan", "stylegan",
|
78 |
-
"
|
79 |
-
|
80 |
-
i = 0
|
81 |
-
while i < len(prediction_list[0]) and prediction_list[0][i] != np.max(prediction_list):
|
82 |
-
i += 1
|
83 |
|
84 |
-
return f"This image is probably fake and generated by {keywords[
|
85 |
|
86 |
|
87 |
-
#need to change filters and final image
|
88 |
def normalize_image(image,normalizing_factor=255):
|
89 |
if image.mode == 'RGB':
|
90 |
return np.asarray(image).reshape(image.size[0],image.size[1],3)/normalizing_factor
|
@@ -112,10 +107,10 @@ def normalized_gdct(image):
|
|
112 |
|
113 |
|
114 |
def final_image_array_dual(image):
|
115 |
-
cropped_key,
|
116 |
if cropped_key == 1:
|
117 |
-
two_images = [normalized_gdct(
|
118 |
-
return two_images
|
119 |
|
120 |
def create_model_dual(model_weights_file = "dual_channel_model_best_val_real_precision_weights"):
|
121 |
try:
|
@@ -156,32 +151,28 @@ def create_model_dual(model_weights_file = "dual_channel_model_best_val_real_pre
|
|
156 |
model_test_dual = create_model_dual()
|
157 |
|
158 |
|
159 |
-
def real_or_not_dual(
|
160 |
-
cropped_key,
|
161 |
if cropped_key == -1:
|
162 |
return "This image cannot be processed as it is not RGB."
|
163 |
elif cropped_key == -2:
|
164 |
return "This image is too small to be processed."
|
165 |
else:
|
166 |
-
image1, image2 = final_image_array_dual(
|
167 |
prediction_list = model.predict([image1.reshape(1,256,256,1), image2.reshape(1,256,256,3)])
|
168 |
if np.argmax(prediction_list) == len(prediction_list[0]) - 1:
|
169 |
return "This image is probably real."
|
170 |
else:
|
171 |
-
keywords = ["biggan", "crn", "cyclegan","deepfake","gaugan","imle","progan","san","seeingdark","stargan", "stylegan",
|
172 |
-
"
|
173 |
|
174 |
-
i = 0
|
175 |
-
while i < len(prediction_list[0]) and prediction_list[0][i] != np.max(prediction_list):
|
176 |
-
i += 1
|
177 |
-
|
178 |
-
return f"This image is probably fake and generated by {keywords[i]}."
|
179 |
|
|
|
180 |
|
181 |
|
182 |
interface1 = gr.Interface(fn = real_or_not_single,
|
183 |
title = "AI or Real?",
|
184 |
-
inputs = gr.Image(show_label=False),
|
185 |
outputs = "text",
|
186 |
description = "<center>Upload your image and we will determine whether it's <strong> real </strong> or <strong> AI-generated </strong> using a Single Channel Neural Network. <br> Please flag any erroneous output.</center>"
|
187 |
)
|
@@ -189,7 +180,7 @@ interface1 = gr.Interface(fn = real_or_not_single,
|
|
189 |
|
190 |
interface2 = gr.Interface(fn = real_or_not_dual,
|
191 |
title = "AI or Real?",
|
192 |
-
inputs = gr.Image(show_label=False),
|
193 |
outputs = "text",
|
194 |
description = "<center>Upload your image and we will determine whether it's <strong> real </strong> or <strong> AI-generated </strong> using a Dual Channel Neural Network. <br> Please flag any erroneous output.</center>"
|
195 |
)
|
|
|
13 |
|
14 |
|
15 |
|
16 |
+
def crop_image(image): #Gradio takes image, and automatically converts into an ndarray.
|
17 |
#This function will return a value which indicates whether the crop succeeded or not
|
18 |
#and why it failed, if it did. It will also return the cropped image if succeeded, and the original one otherwise.
|
19 |
#-1 indicates failure because of grayscale or non-RGB.
|
20 |
#-2 indicates failure because image is too small to crop
|
21 |
# 1 indicates success.
|
22 |
+
if len(np.asarray(image).shape)!=3: #This is a grayscale image.
|
23 |
+
return -1, image
|
24 |
+
x_dim, y_dim = image.size
|
25 |
if x_dim < 256 or y_dim < 256:
|
26 |
+
return -2, image
|
|
|
27 |
left, upper = randint(0, x_dim-256), randint(0, y_dim-256)
|
28 |
right, lower = 256+left, 256+upper
|
29 |
image = image.crop((left,upper,right,lower))
|
30 |
+
return 1, image
|
31 |
|
32 |
def HPF_filter(image):
|
33 |
return im_lib.fromarray(np.asarray(image)-np.asarray(image.filter(ImageFilter.GaussianBlur)))
|
34 |
|
35 |
+
def final_image_array_single(image):
|
36 |
+
cropped_key, image = crop_image(image)
|
37 |
if cropped_key == 1:
|
38 |
+
image_array = np.asarray(HPF_filter(image))
|
39 |
return image_array
|
40 |
|
41 |
def create_model_single(model_weights_file = "single_channel_model_best_val_real_precision_weights"):
|
|
|
61 |
|
62 |
model_test_single = create_model_single()
|
63 |
|
64 |
+
def real_or_not_single(image, model = model_test_single):
|
65 |
+
cropped_key, cropped_image = crop_image(image)
|
66 |
if cropped_key == -1:
|
67 |
return "This image cannot be processed as it is not RGB."
|
68 |
elif cropped_key == -2:
|
69 |
return "This image is too small to be processed."
|
70 |
else:
|
71 |
+
image_array = final_image_array_single(cropped_image)
|
72 |
prediction_list = model.predict(image_array.reshape(1,256,256,3))
|
73 |
if np.argmax(prediction_list) == len(prediction_list[0]) - 1:
|
74 |
return "This image is probably real."
|
75 |
else:
|
76 |
+
keywords = ["biggan", "crn", "cyclegan","deepfake","gaugan","imle","progan","san","seeingdark","stargan", "stylegan2", "stylegan",
|
77 |
+
"whichfaceisreal"]
|
78 |
+
|
|
|
|
|
|
|
79 |
|
80 |
+
return f"This image is probably fake and generated by {keywords[np.argmax(prediction_list)]}."
|
81 |
|
82 |
|
|
|
83 |
def normalize_image(image,normalizing_factor=255):
|
84 |
if image.mode == 'RGB':
|
85 |
return np.asarray(image).reshape(image.size[0],image.size[1],3)/normalizing_factor
|
|
|
107 |
|
108 |
|
109 |
def final_image_array_dual(image):
|
110 |
+
cropped_key, image= crop_image(image)
|
111 |
if cropped_key == 1:
|
112 |
+
two_images = [normalized_gdct(image), highpassrgb(image)]
|
113 |
+
return two_images
|
114 |
|
115 |
def create_model_dual(model_weights_file = "dual_channel_model_best_val_real_precision_weights"):
|
116 |
try:
|
|
|
151 |
model_test_dual = create_model_dual()
|
152 |
|
153 |
|
154 |
+
def real_or_not_dual(image, model = model_test_dual): #must take an array
|
155 |
+
cropped_key, cropped_image = crop_image(image)
|
156 |
if cropped_key == -1:
|
157 |
return "This image cannot be processed as it is not RGB."
|
158 |
elif cropped_key == -2:
|
159 |
return "This image is too small to be processed."
|
160 |
else:
|
161 |
+
image1, image2 = final_image_array_dual(cropped_image)
|
162 |
prediction_list = model.predict([image1.reshape(1,256,256,1), image2.reshape(1,256,256,3)])
|
163 |
if np.argmax(prediction_list) == len(prediction_list[0]) - 1:
|
164 |
return "This image is probably real."
|
165 |
else:
|
166 |
+
keywords = ["biggan", "crn", "cyclegan","deepfake","gaugan","imle","progan","san","seeingdark","stargan", "stylegan2", "stylegan",
|
167 |
+
"whichfaceisreal"]
|
168 |
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
+
return f"This image is probably fake and generated by {keywords[np.argmax(prediction_list)]}."
|
171 |
|
172 |
|
173 |
interface1 = gr.Interface(fn = real_or_not_single,
|
174 |
title = "AI or Real?",
|
175 |
+
inputs = gr.Image(show_label = False, type = "pil"),
|
176 |
outputs = "text",
|
177 |
description = "<center>Upload your image and we will determine whether it's <strong> real </strong> or <strong> AI-generated </strong> using a Single Channel Neural Network. <br> Please flag any erroneous output.</center>"
|
178 |
)
|
|
|
180 |
|
181 |
interface2 = gr.Interface(fn = real_or_not_dual,
|
182 |
title = "AI or Real?",
|
183 |
+
inputs = gr.Image(show_label = False, type = "pil"),
|
184 |
outputs = "text",
|
185 |
description = "<center>Upload your image and we will determine whether it's <strong> real </strong> or <strong> AI-generated </strong> using a Dual Channel Neural Network. <br> Please flag any erroneous output.</center>"
|
186 |
)
|