asdasdasdasd's picture
Update app.py
ee65817
raw
history blame
2.73 kB
import tensorflow.keras as K
import os
from tensorflow.keras import layers
import tensorflow as tf
import gradio as gr
from extract_landmarks import get_data_for_test,extract_landmark,merge_video_prediction
block_size = 60
DROPOUT_RATE = 0.5
RNN_UNIT = 64
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
gpus = tf.config.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(device=gpu, enable=True)
device = "CPU" if len(gpus) == 0 else "GPU"
def predict(video):
path = extract_landmark(video)
test_samples, test_samples_diff, _, _, test_sv, test_vc = get_data_for_test(path, 1, block_size)
model = K.Sequential([
layers.InputLayer(input_shape=(block_size, 136)),
layers.Dropout(0.25),
layers.Bidirectional(layers.GRU(RNN_UNIT)),
layers.Dropout(DROPOUT_RATE),
layers.Dense(64, activation='relu'),
layers.Dropout(DROPOUT_RATE),
layers.Dense(2, activation='softmax')
])
model_diff = K.Sequential([
layers.InputLayer(input_shape=(block_size - 1, 136)),
layers.Dropout(0.25),
layers.Bidirectional(layers.GRU(RNN_UNIT)),
layers.Dropout(DROPOUT_RATE),
layers.Dense(64, activation='relu'),
layers.Dropout(DROPOUT_RATE),
layers.Dense(2, activation='softmax')
])
lossFunction = K.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = K.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer,
loss=lossFunction,
metrics=['accuracy'])
model_diff.compile(optimizer=optimizer,
loss=lossFunction,
metrics=['accuracy'])
#----Using Deeperforensics 1.0 Parameters----#
model.load_weights('g1.h5')
model_diff.load_weights('g2.h5')
prediction = model.predict(test_samples)
prediction_diff = model_diff.predict(test_samples_diff)
mix_predict = []
for i in range(len(prediction)):
mix = prediction[i][1] + prediction_diff[i][1]
mix_predict.append(mix/2)
prediction_video = merge_video_prediction(mix_predict, test_sv, test_vc)
video_names = []
for key in test_vc.keys():
video_names.append(key)
for i, pd in enumerate(prediction_video):
if pd >= 0.5:
label = "Fake"
else:
label = "Real"
return label
inputs = gr.inputs.Video()
outputs = gr.outputs.Textbox()
iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs,
examples=[["sample_fake.mp4"],["sample_real.mp4"]])
iface.launch()