Spaces:
Runtime error
Runtime error
from PIL import Image | |
import streamlit as st | |
import cv2 | |
import numpy as np | |
import os | |
import tensorflow as tf | |
from IMVIP_Supplementary_Material.scripts import dfutils #methods used for DF-Net | |
DESCRIPTION = """# DF-Net | |
The Digital Forensics Network is designed and trained to detect and locate image manipulations. | |
More information can be found in this [publication](https://zenodo.org/record/8214996) | |
""" | |
IMG_SIZE=256 | |
tf.experimental.numpy.experimental_enable_numpy_behavior() | |
#np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning) | |
def check_forgery_df(img): | |
shape_original = img.shape | |
img = cv2.resize(img, (IMG_SIZE,IMG_SIZE)) | |
x = np.expand_dims( img.astype('float32')/255., axis=0 ) | |
pred1 = model_M1.predict(x, verbose=0) | |
pred2= model_M2.predict(x, verbose=0) | |
# Ensure pred1 and pred2 are numpy arrays before proceeding | |
if isinstance(pred1, dict): | |
print("pred1 is dict!") | |
pred1 = pred1[next(iter(pred1))] | |
if isinstance(pred2, dict): | |
pred2 = pred2[next(iter(pred2))] | |
pred = np.max([pred1,pred2], axis=0) | |
pred = dfutils.create_mask(pred) | |
pred = pred.reshape(pred.shape[-3:-1]) | |
resized_image = cv2.resize(pred, (shape_original[1],shape_original[0]), interpolation=cv2.INTER_LINEAR) | |
return resized_image | |
def evaluate(img): | |
pre_t = check_forgery_df(img) | |
st.image(pre_t, caption="White area indicates potential image manipulations.") | |
st.markdown(DESCRIPTION) | |
uploaded_file = st.file_uploader("Please upload an image", type=["jpeg", "jpg", "png"]) | |
if uploaded_file is not None: | |
#load models | |
model_path1 = "IMVIP_Supplementary_Material/models/model1/" | |
model_path2 = "IMVIP_Supplementary_Material/models/model2/" | |
tfsm_layer1 = tf.keras.layers.TFSMLayer(model_path1, call_endpoint='serving_default') | |
tfsm_layer2 = tf.keras.layers.TFSMLayer(model_path2, call_endpoint='serving_default') | |
input_shape = (256, 256, 3) | |
inputs = Input(shape=input_shape) | |
#create the model | |
outputs1 = tfsm_layer1(inputs) | |
model_M1 = Model(inputs, outputs1) | |
outputs2 = tfsm_layer2(inputs) | |
model_M2 = Model(inputs, outputs2) | |
#model_M1 = tf.keras.layers.TFSMLayer("IMVIP_Supplementary_Material/models/model1/") #tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/") | |
#model_M2 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model2/") | |
# Convert the file to an opencv image. | |
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8) | |
opencv_image = cv2.imdecode(file_bytes, 1) | |
reversed_image = opencv_image[:, :, ::-1] | |
st.image(reversed_image, caption="Input Image") | |
evaluate(reversed_image) | |