File size: 3,461 Bytes
c96f53a
057994c
542d3d4
c7a840d
c894298
 
 
96e5a5e
c894298
 
 
 
 
ad87f99
c894298
 
 
 
 
 
 
96e5a5e
b0df336
c894298
 
 
 
 
 
 
9323d30
 
c894298
 
 
 
 
 
 
 
9323d30
 
c894298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2e94ee0
c894298
 
 
 
 
 
 
 
 
 
 
2e94ee0
 
c894298
 
 
 
 
 
 
 
b3ae8f7
96e5a5e
b0df336
96e5a5e
c894298
 
9069e90
c894298
575695c
74baaee
c4085a1
828cc69
c5c3656
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import tensorflow.keras as K
from tensorflow.keras import layers
import keras
import os
import tensorflow as tf
import gradio as gr
from extract_landmarks import get_data_for_test,extract_landmark,merge_video_prediction
from detect_from_videos import test_full_image_network

block_size = 60
DROPOUT_RATE = 0.5
RNN_UNIT = 64
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

gpus = tf.config.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(device=gpu, enable=True)
device = "CPU" if len(gpus) == 0 else "GPU"
print("using {}".format(device))

model_path = '3_ff_raw.pkl'
output_path= 'output'
def predict(video):
    path = extract_landmark(video)
    test_samples, test_samples_diff, _, _, test_sv, test_vc = get_data_for_test(path, 1, block_size)

    model = K.Sequential([
        layers.InputLayer(input_shape=(block_size, 136)),
        layers.Dropout(0.25),
        #layers.Bidirectional(layers.GRU(RNN_UNIT)),
        keras.layers.wrappers.Bidirectional(keras.layers.recurrent_v2.GRU(RNN_UNIT)),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(64, activation='relu'),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(2, activation='softmax')
    ])
    model_diff = K.Sequential([
        layers.InputLayer(input_shape=(block_size - 1, 136)),
        layers.Dropout(0.25),
        #layers.Bidirectional(layers.GRU(RNN_UNIT)),
        keras.layers.wrappers.Bidirectional(keras.layers.recurrent_v2.GRU(RNN_UNIT)),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(64, activation='relu'),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(2, activation='softmax')
    ])

    lossFunction = K.losses.SparseCategoricalCrossentropy(from_logits=False)
    optimizer = K.optimizers.Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer,
                  loss=lossFunction,
                  metrics=['accuracy'])
    model_diff.compile(optimizer=optimizer,
                  loss=lossFunction,
                  metrics=['accuracy'])

#----Using Deeperforensics 1.0 Parameters----#
    print("*************** loading the model ***************")
    model.load_weights('g1.h5')
    model_diff.load_weights('g2.h5')

    prediction = model.predict(test_samples)
    prediction_diff = model_diff.predict(test_samples_diff)
    mix_predict = []
    for i in range(len(prediction)):
        mix = prediction[i][1] + prediction_diff[i][1]
        mix_predict.append(mix/2)

    prediction_video = merge_video_prediction(mix_predict, test_sv, test_vc)
    
    print("*************** start predict ***************")
    video_names = []
    for key in test_vc.keys():
        video_names.append(key)
    for i, pd in enumerate(prediction_video):
        if pd >= 0.5:
            label = "Fake"
        else:
            label = "Real"
    print("the pd is {}".format(pd))
    
    output_video = test_full_image_network(video,model_path,output_path)
    return label,output_video

inputs = gr.inputs.Video()
outputs = [gr.outputs.Textbox(),gr.outputs.Video()]
iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs,
                     examples=[["sample__real.mp4"],["sample__fake.mp4"],["sample__real2.mp4"],["sample__fake2.mp4"]],
                     theme = "grass",
                     title = "人脸伪造检测",
                     description = "输入:视频         输出:真/假" )
iface.launch()