asdasdasdasd's picture
Update app.py
9069e90
raw
history blame contribute delete
No virus
3.46 kB
import tensorflow.keras as K
from tensorflow.keras import layers
import keras
import os
import tensorflow as tf
import gradio as gr
from extract_landmarks import get_data_for_test,extract_landmark,merge_video_prediction
from detect_from_videos import test_full_image_network
block_size = 60
DROPOUT_RATE = 0.5
RNN_UNIT = 64
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
gpus = tf.config.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(device=gpu, enable=True)
device = "CPU" if len(gpus) == 0 else "GPU"
print("using {}".format(device))
model_path = '3_ff_raw.pkl'
output_path= 'output'
def predict(video):
path = extract_landmark(video)
test_samples, test_samples_diff, _, _, test_sv, test_vc = get_data_for_test(path, 1, block_size)
model = K.Sequential([
layers.InputLayer(input_shape=(block_size, 136)),
layers.Dropout(0.25),
#layers.Bidirectional(layers.GRU(RNN_UNIT)),
keras.layers.wrappers.Bidirectional(keras.layers.recurrent_v2.GRU(RNN_UNIT)),
layers.Dropout(DROPOUT_RATE),
layers.Dense(64, activation='relu'),
layers.Dropout(DROPOUT_RATE),
layers.Dense(2, activation='softmax')
])
model_diff = K.Sequential([
layers.InputLayer(input_shape=(block_size - 1, 136)),
layers.Dropout(0.25),
#layers.Bidirectional(layers.GRU(RNN_UNIT)),
keras.layers.wrappers.Bidirectional(keras.layers.recurrent_v2.GRU(RNN_UNIT)),
layers.Dropout(DROPOUT_RATE),
layers.Dense(64, activation='relu'),
layers.Dropout(DROPOUT_RATE),
layers.Dense(2, activation='softmax')
])
lossFunction = K.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = K.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer,
loss=lossFunction,
metrics=['accuracy'])
model_diff.compile(optimizer=optimizer,
loss=lossFunction,
metrics=['accuracy'])
#----Using Deeperforensics 1.0 Parameters----#
print("*************** loading the model ***************")
model.load_weights('g1.h5')
model_diff.load_weights('g2.h5')
prediction = model.predict(test_samples)
prediction_diff = model_diff.predict(test_samples_diff)
mix_predict = []
for i in range(len(prediction)):
mix = prediction[i][1] + prediction_diff[i][1]
mix_predict.append(mix/2)
prediction_video = merge_video_prediction(mix_predict, test_sv, test_vc)
print("*************** start predict ***************")
video_names = []
for key in test_vc.keys():
video_names.append(key)
for i, pd in enumerate(prediction_video):
if pd >= 0.5:
label = "Fake"
else:
label = "Real"
print("the pd is {}".format(pd))
output_video = test_full_image_network(video,model_path,output_path)
return label,output_video
inputs = gr.inputs.Video()
outputs = [gr.outputs.Textbox(),gr.outputs.Video()]
iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs,
examples=[["sample__real.mp4"],["sample__fake.mp4"],["sample__real2.mp4"],["sample__fake2.mp4"]],
theme = "grass",
title = "人脸伪造检测",
description = "输入:视频 输出:真/假" )
iface.launch()