Spaces:
Runtime error
Runtime error
imseldrith
commited on
Commit
•
cf06183
1
Parent(s):
51a2766
Delete DeepFakeAI
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- DeepFakeAI/__init__.py +0 -0
- DeepFakeAI/choices.py +0 -26
- DeepFakeAI/common_helper.py +0 -10
- DeepFakeAI/content_analyser.py +0 -103
- DeepFakeAI/core.py +0 -299
- DeepFakeAI/download.py +0 -44
- DeepFakeAI/execution_helper.py +0 -22
- DeepFakeAI/face_analyser.py +0 -347
- DeepFakeAI/face_helper.py +0 -111
- DeepFakeAI/face_masker.py +0 -128
- DeepFakeAI/face_store.py +0 -47
- DeepFakeAI/ffmpeg.py +0 -81
- DeepFakeAI/filesystem.py +0 -91
- DeepFakeAI/globals.py +0 -51
- DeepFakeAI/installer.py +0 -92
- DeepFakeAI/logger.py +0 -39
- DeepFakeAI/metadata.py +0 -13
- DeepFakeAI/normalizer.py +0 -34
- DeepFakeAI/processors/__init__.py +0 -0
- DeepFakeAI/processors/frame/__init__.py +0 -0
- DeepFakeAI/processors/frame/choices.py +0 -13
- DeepFakeAI/processors/frame/core.py +0 -98
- DeepFakeAI/processors/frame/globals.py +0 -10
- DeepFakeAI/processors/frame/modules/__init__.py +0 -0
- DeepFakeAI/processors/frame/modules/face_debugger.py +0 -142
- DeepFakeAI/processors/frame/modules/face_enhancer.py +0 -249
- DeepFakeAI/processors/frame/modules/face_swapper.py +0 -302
- DeepFakeAI/processors/frame/modules/frame_enhancer.py +0 -172
- DeepFakeAI/processors/frame/typings.py +0 -7
- DeepFakeAI/typing.py +0 -51
- DeepFakeAI/uis/__init__.py +0 -0
- DeepFakeAI/uis/assets/fixes.css +0 -7
- DeepFakeAI/uis/assets/overrides.css +0 -44
- DeepFakeAI/uis/choices.py +0 -7
- DeepFakeAI/uis/components/__init__.py +0 -0
- DeepFakeAI/uis/components/about.py +0 -23
- DeepFakeAI/uis/components/benchmark.py +0 -132
- DeepFakeAI/uis/components/benchmark_options.py +0 -29
- DeepFakeAI/uis/components/common_options.py +0 -38
- DeepFakeAI/uis/components/execution.py +0 -34
- DeepFakeAI/uis/components/execution_queue_count.py +0 -28
- DeepFakeAI/uis/components/execution_thread_count.py +0 -29
- DeepFakeAI/uis/components/face_analyser.py +0 -98
- DeepFakeAI/uis/components/face_masker.py +0 -123
- DeepFakeAI/uis/components/face_selector.py +0 -164
- DeepFakeAI/uis/components/frame_processors.py +0 -40
- DeepFakeAI/uis/components/frame_processors_options.py +0 -141
- DeepFakeAI/uis/components/limit_resources.py +0 -27
- DeepFakeAI/uis/components/output.py +0 -62
- DeepFakeAI/uis/components/output_options.py +0 -94
DeepFakeAI/__init__.py
DELETED
File without changes
|
DeepFakeAI/choices.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
|
3 |
-
from DeepFakeAI.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, TempFrameFormat, OutputVideoEncoder
|
4 |
-
from DeepFakeAI.common_helper import create_range
|
5 |
-
|
6 |
-
face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
|
7 |
-
face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
|
8 |
-
face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ]
|
9 |
-
face_detector_models : List[str] = [ 'retinaface', 'yunet' ]
|
10 |
-
face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
|
11 |
-
face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ]
|
12 |
-
face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ]
|
13 |
-
face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ]
|
14 |
-
temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ]
|
15 |
-
output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
|
16 |
-
|
17 |
-
execution_thread_count_range : List[float] = create_range(1, 128, 1)
|
18 |
-
execution_queue_count_range : List[float] = create_range(1, 32, 1)
|
19 |
-
max_memory_range : List[float] = create_range(0, 128, 1)
|
20 |
-
face_detector_score_range : List[float] = create_range(0.0, 1.0, 0.05)
|
21 |
-
face_mask_blur_range : List[float] = create_range(0.0, 1.0, 0.05)
|
22 |
-
face_mask_padding_range : List[float] = create_range(0, 100, 1)
|
23 |
-
reference_face_distance_range : List[float] = create_range(0.0, 1.5, 0.05)
|
24 |
-
temp_frame_quality_range : List[float] = create_range(0, 100, 1)
|
25 |
-
output_image_quality_range : List[float] = create_range(0, 100, 1)
|
26 |
-
output_video_quality_range : List[float] = create_range(0, 100, 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/common_helper.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from typing import List, Any
|
2 |
-
import numpy
|
3 |
-
|
4 |
-
|
5 |
-
def create_metavar(ranges : List[Any]) -> str:
|
6 |
-
return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
|
7 |
-
|
8 |
-
|
9 |
-
def create_range(start : float, stop : float, step : float) -> List[float]:
|
10 |
-
return (numpy.around(numpy.arange(start, stop + step, step), decimals = 2)).tolist()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/content_analyser.py
DELETED
@@ -1,103 +0,0 @@
|
|
1 |
-
from typing import Any, Dict
|
2 |
-
from functools import lru_cache
|
3 |
-
import threading
|
4 |
-
import cv2
|
5 |
-
import numpy
|
6 |
-
import onnxruntime
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
import DeepFakeAI.globals
|
10 |
-
from DeepFakeAI import wording
|
11 |
-
from DeepFakeAI.typing import Frame, ModelValue
|
12 |
-
from DeepFakeAI.vision import get_video_frame, count_video_frame_total, read_image, detect_fps
|
13 |
-
from DeepFakeAI.filesystem import resolve_relative_path
|
14 |
-
from DeepFakeAI.download import conditional_download
|
15 |
-
|
16 |
-
CONTENT_ANALYSER = None
|
17 |
-
THREAD_LOCK : threading.Lock = threading.Lock()
|
18 |
-
MODELS : Dict[str, ModelValue] =\
|
19 |
-
{
|
20 |
-
'open_nsfw':
|
21 |
-
{
|
22 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/open_nsfw.onnx',
|
23 |
-
'path': resolve_relative_path('../.assets/models/open_nsfw.onnx')
|
24 |
-
}
|
25 |
-
}
|
26 |
-
MAX_PROBABILITY = 0.80
|
27 |
-
MAX_RATE = 5
|
28 |
-
STREAM_COUNTER = 0
|
29 |
-
|
30 |
-
|
31 |
-
def get_content_analyser() -> Any:
|
32 |
-
global CONTENT_ANALYSER
|
33 |
-
|
34 |
-
with THREAD_LOCK:
|
35 |
-
if CONTENT_ANALYSER is None:
|
36 |
-
model_path = MODELS.get('open_nsfw').get('path')
|
37 |
-
CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers)
|
38 |
-
return CONTENT_ANALYSER
|
39 |
-
|
40 |
-
|
41 |
-
def clear_content_analyser() -> None:
|
42 |
-
global CONTENT_ANALYSER
|
43 |
-
|
44 |
-
CONTENT_ANALYSER = None
|
45 |
-
|
46 |
-
|
47 |
-
def pre_check() -> bool:
|
48 |
-
if not DeepFakeAI.globals.skip_download:
|
49 |
-
download_directory_path = resolve_relative_path('../.assets/models')
|
50 |
-
model_url = MODELS.get('open_nsfw').get('url')
|
51 |
-
conditional_download(download_directory_path, [ model_url ])
|
52 |
-
return True
|
53 |
-
|
54 |
-
|
55 |
-
def analyse_stream(frame : Frame, fps : float) -> bool:
|
56 |
-
global STREAM_COUNTER
|
57 |
-
|
58 |
-
STREAM_COUNTER = STREAM_COUNTER + 1
|
59 |
-
if STREAM_COUNTER % int(fps) == 0:
|
60 |
-
return analyse_frame(frame)
|
61 |
-
return False
|
62 |
-
|
63 |
-
|
64 |
-
def prepare_frame(frame : Frame) -> Frame:
|
65 |
-
frame = cv2.resize(frame, (224, 224)).astype(numpy.float32)
|
66 |
-
frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32)
|
67 |
-
frame = numpy.expand_dims(frame, axis = 0)
|
68 |
-
return frame
|
69 |
-
|
70 |
-
|
71 |
-
def analyse_frame(frame : Frame) -> bool:
|
72 |
-
content_analyser = get_content_analyser()
|
73 |
-
frame = prepare_frame(frame)
|
74 |
-
probability = content_analyser.run(None,
|
75 |
-
{
|
76 |
-
'input:0': frame
|
77 |
-
})[0][0][1]
|
78 |
-
return probability > MAX_PROBABILITY
|
79 |
-
|
80 |
-
|
81 |
-
@lru_cache(maxsize = None)
|
82 |
-
def analyse_image(image_path : str) -> bool:
|
83 |
-
frame = read_image(image_path)
|
84 |
-
return analyse_frame(frame)
|
85 |
-
|
86 |
-
|
87 |
-
@lru_cache(maxsize = None)
|
88 |
-
def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool:
|
89 |
-
video_frame_total = count_video_frame_total(video_path)
|
90 |
-
fps = detect_fps(video_path)
|
91 |
-
frame_range = range(start_frame or 0, end_frame or video_frame_total)
|
92 |
-
rate = 0.0
|
93 |
-
counter = 0
|
94 |
-
with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = DeepFakeAI.globals.log_level in [ 'warn', 'error' ]) as progress:
|
95 |
-
for frame_number in frame_range:
|
96 |
-
if frame_number % int(fps) == 0:
|
97 |
-
frame = get_video_frame(video_path, frame_number)
|
98 |
-
if analyse_frame(frame):
|
99 |
-
counter += 1
|
100 |
-
rate = counter * int(fps) / len(frame_range) * 100
|
101 |
-
progress.update()
|
102 |
-
progress.set_postfix(rate = rate)
|
103 |
-
return rate > MAX_RATE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/core.py
DELETED
@@ -1,299 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
os.environ['OMP_NUM_THREADS'] = '1'
|
4 |
-
|
5 |
-
import signal
|
6 |
-
import ssl
|
7 |
-
import sys
|
8 |
-
import warnings
|
9 |
-
import platform
|
10 |
-
import shutil
|
11 |
-
import onnxruntime
|
12 |
-
from argparse import ArgumentParser, HelpFormatter
|
13 |
-
|
14 |
-
import DeepFakeAI.choices
|
15 |
-
import DeepFakeAI.globals
|
16 |
-
from DeepFakeAI.face_analyser import get_one_face, get_average_face
|
17 |
-
from DeepFakeAI.face_store import get_reference_faces, append_reference_face
|
18 |
-
from DeepFakeAI.vision import get_video_frame, detect_fps, read_image, read_static_images
|
19 |
-
from DeepFakeAI import face_analyser, face_masker, content_analyser, metadata, logger, wording
|
20 |
-
from DeepFakeAI.content_analyser import analyse_image, analyse_video
|
21 |
-
from DeepFakeAI.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
|
22 |
-
from DeepFakeAI.common_helper import create_metavar
|
23 |
-
from DeepFakeAI.execution_helper import encode_execution_providers, decode_execution_providers
|
24 |
-
from DeepFakeAI.normalizer import normalize_output_path, normalize_padding
|
25 |
-
from DeepFakeAI.filesystem import is_image, is_video, list_module_names, get_temp_frame_paths, create_temp, move_temp, clear_temp
|
26 |
-
from DeepFakeAI.ffmpeg import extract_frames, compress_image, merge_video, restore_audio
|
27 |
-
|
28 |
-
onnxruntime.set_default_logger_severity(3)
|
29 |
-
warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
|
30 |
-
warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
|
31 |
-
|
32 |
-
if platform.system().lower() == 'darwin':
|
33 |
-
ssl._create_default_https_context = ssl._create_unverified_context
|
34 |
-
|
35 |
-
|
36 |
-
def cli() -> None:
|
37 |
-
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
|
38 |
-
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
|
39 |
-
# general
|
40 |
-
program.add_argument('-s', '--source', action = 'append', help = wording.get('source_help'), dest = 'source_paths')
|
41 |
-
program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
|
42 |
-
program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
|
43 |
-
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
|
44 |
-
# misc
|
45 |
-
group_misc = program.add_argument_group('misc')
|
46 |
-
group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), action = 'store_true')
|
47 |
-
group_misc.add_argument('--headless', help = wording.get('headless_help'), action = 'store_true')
|
48 |
-
group_misc.add_argument('--log-level', help = wording.get('log_level_help'), default = 'info', choices = logger.get_log_levels())
|
49 |
-
# execution
|
50 |
-
execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
|
51 |
-
group_execution = program.add_argument_group('execution')
|
52 |
-
group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = ', '.join(execution_providers)), default = [ 'cpu' ], choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
|
53 |
-
group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), type = int, default = 4, choices = DeepFakeAI.choices.execution_thread_count_range, metavar = create_metavar(DeepFakeAI.choices.execution_thread_count_range))
|
54 |
-
group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), type = int, default = 1, choices = DeepFakeAI.choices.execution_queue_count_range, metavar = create_metavar(DeepFakeAI.choices.execution_queue_count_range))
|
55 |
-
group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), type = int, choices = DeepFakeAI.choices.max_memory_range, metavar = create_metavar(DeepFakeAI.choices.max_memory_range))
|
56 |
-
# face analyser
|
57 |
-
group_face_analyser = program.add_argument_group('face analyser')
|
58 |
-
group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), default = 'left-right', choices = DeepFakeAI.choices.face_analyser_orders)
|
59 |
-
group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), choices = DeepFakeAI.choices.face_analyser_ages)
|
60 |
-
group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), choices = DeepFakeAI.choices.face_analyser_genders)
|
61 |
-
group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), default = 'retinaface', choices = DeepFakeAI.choices.face_detector_models)
|
62 |
-
group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), default = '640x640', choices = DeepFakeAI.choices.face_detector_sizes)
|
63 |
-
group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), type = float, default = 0.5, choices = DeepFakeAI.choices.face_detector_score_range, metavar = create_metavar(DeepFakeAI.choices.face_detector_score_range))
|
64 |
-
# face selector
|
65 |
-
group_face_selector = program.add_argument_group('face selector')
|
66 |
-
group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), default = 'reference', choices = DeepFakeAI.choices.face_selector_modes)
|
67 |
-
group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), type = int, default = 0)
|
68 |
-
group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), type = float, default = 0.6, choices = DeepFakeAI.choices.reference_face_distance_range, metavar = create_metavar(DeepFakeAI.choices.reference_face_distance_range))
|
69 |
-
group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), type = int, default = 0)
|
70 |
-
# face mask
|
71 |
-
group_face_mask = program.add_argument_group('face mask')
|
72 |
-
group_face_mask.add_argument('--face-mask-types', help = wording.get('face_mask_types_help').format(choices = ', '.join(DeepFakeAI.choices.face_mask_types)), default = [ 'box' ], choices = DeepFakeAI.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
|
73 |
-
group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), type = float, default = 0.3, choices = DeepFakeAI.choices.face_mask_blur_range, metavar = create_metavar(DeepFakeAI.choices.face_mask_blur_range))
|
74 |
-
group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), type = int, default = [ 0, 0, 0, 0 ], nargs = '+')
|
75 |
-
group_face_mask.add_argument('--face-mask-regions', help = wording.get('face_mask_regions_help').format(choices = ', '.join(DeepFakeAI.choices.face_mask_regions)), default = DeepFakeAI.choices.face_mask_regions, choices = DeepFakeAI.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
|
76 |
-
# frame extraction
|
77 |
-
group_frame_extraction = program.add_argument_group('frame extraction')
|
78 |
-
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), type = int)
|
79 |
-
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), type = int)
|
80 |
-
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), default = 'jpg', choices = DeepFakeAI.choices.temp_frame_formats)
|
81 |
-
group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), type = int, default = 100, choices = DeepFakeAI.choices.temp_frame_quality_range, metavar = create_metavar(DeepFakeAI.choices.temp_frame_quality_range))
|
82 |
-
group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), action = 'store_true')
|
83 |
-
# output creation
|
84 |
-
group_output_creation = program.add_argument_group('output creation')
|
85 |
-
group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), type = int, default = 80, choices = DeepFakeAI.choices.output_image_quality_range, metavar = create_metavar(DeepFakeAI.choices.output_image_quality_range))
|
86 |
-
group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), default = 'libx264', choices = DeepFakeAI.choices.output_video_encoders)
|
87 |
-
group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), type = int, default = 80, choices = DeepFakeAI.choices.output_video_quality_range, metavar = create_metavar(DeepFakeAI.choices.output_video_quality_range))
|
88 |
-
group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), action = 'store_true')
|
89 |
-
group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), action = 'store_true')
|
90 |
-
# frame processors
|
91 |
-
available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules')
|
92 |
-
program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
|
93 |
-
group_frame_processors = program.add_argument_group('frame processors')
|
94 |
-
group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), default = [ 'face_swapper' ], nargs = '+')
|
95 |
-
for frame_processor in available_frame_processors:
|
96 |
-
frame_processor_module = load_frame_processor_module(frame_processor)
|
97 |
-
frame_processor_module.register_args(group_frame_processors)
|
98 |
-
# uis
|
99 |
-
group_uis = program.add_argument_group('uis')
|
100 |
-
group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('DeepFakeAI/uis/layouts'))), default = [ 'default' ], nargs = '+')
|
101 |
-
run(program)
|
102 |
-
|
103 |
-
|
104 |
-
def apply_args(program : ArgumentParser) -> None:
|
105 |
-
args = program.parse_args()
|
106 |
-
# general
|
107 |
-
DeepFakeAI.globals.source_paths = args.source_paths
|
108 |
-
DeepFakeAI.globals.target_path = args.target_path
|
109 |
-
DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.target_path, args.output_path)
|
110 |
-
# misc
|
111 |
-
DeepFakeAI.globals.skip_download = args.skip_download
|
112 |
-
DeepFakeAI.globals.headless = args.headless
|
113 |
-
DeepFakeAI.globals.log_level = args.log_level
|
114 |
-
# execution
|
115 |
-
DeepFakeAI.globals.execution_providers = decode_execution_providers(args.execution_providers)
|
116 |
-
DeepFakeAI.globals.execution_thread_count = args.execution_thread_count
|
117 |
-
DeepFakeAI.globals.execution_queue_count = args.execution_queue_count
|
118 |
-
DeepFakeAI.globals.max_memory = args.max_memory
|
119 |
-
# face analyser
|
120 |
-
DeepFakeAI.globals.face_analyser_order = args.face_analyser_order
|
121 |
-
DeepFakeAI.globals.face_analyser_age = args.face_analyser_age
|
122 |
-
DeepFakeAI.globals.face_analyser_gender = args.face_analyser_gender
|
123 |
-
DeepFakeAI.globals.face_detector_model = args.face_detector_model
|
124 |
-
DeepFakeAI.globals.face_detector_size = args.face_detector_size
|
125 |
-
DeepFakeAI.globals.face_detector_score = args.face_detector_score
|
126 |
-
# face selector
|
127 |
-
DeepFakeAI.globals.face_selector_mode = args.face_selector_mode
|
128 |
-
DeepFakeAI.globals.reference_face_position = args.reference_face_position
|
129 |
-
DeepFakeAI.globals.reference_face_distance = args.reference_face_distance
|
130 |
-
DeepFakeAI.globals.reference_frame_number = args.reference_frame_number
|
131 |
-
# face mask
|
132 |
-
DeepFakeAI.globals.face_mask_types = args.face_mask_types
|
133 |
-
DeepFakeAI.globals.face_mask_blur = args.face_mask_blur
|
134 |
-
DeepFakeAI.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
|
135 |
-
DeepFakeAI.globals.face_mask_regions = args.face_mask_regions
|
136 |
-
# frame extraction
|
137 |
-
DeepFakeAI.globals.trim_frame_start = args.trim_frame_start
|
138 |
-
DeepFakeAI.globals.trim_frame_end = args.trim_frame_end
|
139 |
-
DeepFakeAI.globals.temp_frame_format = args.temp_frame_format
|
140 |
-
DeepFakeAI.globals.temp_frame_quality = args.temp_frame_quality
|
141 |
-
DeepFakeAI.globals.keep_temp = args.keep_temp
|
142 |
-
# output creation
|
143 |
-
DeepFakeAI.globals.output_image_quality = args.output_image_quality
|
144 |
-
DeepFakeAI.globals.output_video_encoder = args.output_video_encoder
|
145 |
-
DeepFakeAI.globals.output_video_quality = args.output_video_quality
|
146 |
-
DeepFakeAI.globals.keep_fps = args.keep_fps
|
147 |
-
DeepFakeAI.globals.skip_audio = args.skip_audio
|
148 |
-
# frame processors
|
149 |
-
available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules')
|
150 |
-
DeepFakeAI.globals.frame_processors = args.frame_processors
|
151 |
-
for frame_processor in available_frame_processors:
|
152 |
-
frame_processor_module = load_frame_processor_module(frame_processor)
|
153 |
-
frame_processor_module.apply_args(program)
|
154 |
-
# uis
|
155 |
-
DeepFakeAI.globals.ui_layouts = args.ui_layouts
|
156 |
-
|
157 |
-
|
158 |
-
def run(program : ArgumentParser) -> None:
|
159 |
-
apply_args(program)
|
160 |
-
logger.init(DeepFakeAI.globals.log_level)
|
161 |
-
limit_resources()
|
162 |
-
if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check():
|
163 |
-
return
|
164 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
165 |
-
if not frame_processor_module.pre_check():
|
166 |
-
return
|
167 |
-
if DeepFakeAI.globals.headless:
|
168 |
-
conditional_process()
|
169 |
-
else:
|
170 |
-
import DeepFakeAI.uis.core as ui
|
171 |
-
|
172 |
-
for ui_layout in ui.get_ui_layouts_modules(DeepFakeAI.globals.ui_layouts):
|
173 |
-
if not ui_layout.pre_check():
|
174 |
-
return
|
175 |
-
ui.launch()
|
176 |
-
|
177 |
-
|
178 |
-
def destroy() -> None:
|
179 |
-
if DeepFakeAI.globals.target_path:
|
180 |
-
clear_temp(DeepFakeAI.globals.target_path)
|
181 |
-
sys.exit()
|
182 |
-
|
183 |
-
|
184 |
-
def limit_resources() -> None:
|
185 |
-
if DeepFakeAI.globals.max_memory:
|
186 |
-
memory = DeepFakeAI.globals.max_memory * 1024 ** 3
|
187 |
-
if platform.system().lower() == 'darwin':
|
188 |
-
memory = DeepFakeAI.globals.max_memory * 1024 ** 6
|
189 |
-
if platform.system().lower() == 'windows':
|
190 |
-
import ctypes
|
191 |
-
|
192 |
-
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
|
193 |
-
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
|
194 |
-
else:
|
195 |
-
import resource
|
196 |
-
|
197 |
-
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
|
198 |
-
|
199 |
-
|
200 |
-
def pre_check() -> bool:
|
201 |
-
if sys.version_info < (3, 9):
|
202 |
-
logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper())
|
203 |
-
return False
|
204 |
-
if not shutil.which('ffmpeg'):
|
205 |
-
logger.error(wording.get('ffmpeg_not_installed'), __name__.upper())
|
206 |
-
return False
|
207 |
-
return True
|
208 |
-
|
209 |
-
|
210 |
-
def conditional_process() -> None:
|
211 |
-
conditional_append_reference_faces()
|
212 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
213 |
-
if not frame_processor_module.pre_process('output'):
|
214 |
-
return
|
215 |
-
if is_image(DeepFakeAI.globals.target_path):
|
216 |
-
process_image()
|
217 |
-
if is_video(DeepFakeAI.globals.target_path):
|
218 |
-
process_video()
|
219 |
-
|
220 |
-
|
221 |
-
def conditional_append_reference_faces() -> None:
|
222 |
-
if 'reference' in DeepFakeAI.globals.face_selector_mode and not get_reference_faces():
|
223 |
-
source_frames = read_static_images(DeepFakeAI.globals.source_paths)
|
224 |
-
source_face = get_average_face(source_frames)
|
225 |
-
if is_video(DeepFakeAI.globals.target_path):
|
226 |
-
reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number)
|
227 |
-
else:
|
228 |
-
reference_frame = read_image(DeepFakeAI.globals.target_path)
|
229 |
-
reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position)
|
230 |
-
append_reference_face('origin', reference_face)
|
231 |
-
if source_face and reference_face:
|
232 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
233 |
-
reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame)
|
234 |
-
reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position)
|
235 |
-
append_reference_face(frame_processor_module.__name__, reference_face)
|
236 |
-
|
237 |
-
|
238 |
-
def process_image() -> None:
|
239 |
-
if analyse_image(DeepFakeAI.globals.target_path):
|
240 |
-
return
|
241 |
-
shutil.copy2(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
242 |
-
# process frame
|
243 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
244 |
-
logger.info(wording.get('processing'), frame_processor_module.NAME)
|
245 |
-
frame_processor_module.process_image(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.output_path, DeepFakeAI.globals.output_path)
|
246 |
-
frame_processor_module.post_process()
|
247 |
-
# compress image
|
248 |
-
logger.info(wording.get('compressing_image'), __name__.upper())
|
249 |
-
if not compress_image(DeepFakeAI.globals.output_path):
|
250 |
-
logger.error(wording.get('compressing_image_failed'), __name__.upper())
|
251 |
-
# validate image
|
252 |
-
if is_image(DeepFakeAI.globals.output_path):
|
253 |
-
logger.info(wording.get('processing_image_succeed'), __name__.upper())
|
254 |
-
else:
|
255 |
-
logger.error(wording.get('processing_image_failed'), __name__.upper())
|
256 |
-
|
257 |
-
|
258 |
-
def process_video() -> None:
|
259 |
-
if analyse_video(DeepFakeAI.globals.target_path, DeepFakeAI.globals.trim_frame_start, DeepFakeAI.globals.trim_frame_end):
|
260 |
-
return
|
261 |
-
fps = detect_fps(DeepFakeAI.globals.target_path) if DeepFakeAI.globals.keep_fps else 25.0
|
262 |
-
# create temp
|
263 |
-
logger.info(wording.get('creating_temp'), __name__.upper())
|
264 |
-
create_temp(DeepFakeAI.globals.target_path)
|
265 |
-
# extract frames
|
266 |
-
logger.info(wording.get('extracting_frames_fps').format(fps = fps), __name__.upper())
|
267 |
-
extract_frames(DeepFakeAI.globals.target_path, fps)
|
268 |
-
# process frame
|
269 |
-
temp_frame_paths = get_temp_frame_paths(DeepFakeAI.globals.target_path)
|
270 |
-
if temp_frame_paths:
|
271 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
272 |
-
logger.info(wording.get('processing'), frame_processor_module.NAME)
|
273 |
-
frame_processor_module.process_video(DeepFakeAI.globals.source_paths, temp_frame_paths)
|
274 |
-
frame_processor_module.post_process()
|
275 |
-
else:
|
276 |
-
logger.error(wording.get('temp_frames_not_found'), __name__.upper())
|
277 |
-
return
|
278 |
-
# merge video
|
279 |
-
logger.info(wording.get('merging_video_fps').format(fps = fps), __name__.upper())
|
280 |
-
if not merge_video(DeepFakeAI.globals.target_path, fps):
|
281 |
-
logger.error(wording.get('merging_video_failed'), __name__.upper())
|
282 |
-
return
|
283 |
-
# handle audio
|
284 |
-
if DeepFakeAI.globals.skip_audio:
|
285 |
-
logger.info(wording.get('skipping_audio'), __name__.upper())
|
286 |
-
move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
287 |
-
else:
|
288 |
-
logger.info(wording.get('restoring_audio'), __name__.upper())
|
289 |
-
if not restore_audio(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path):
|
290 |
-
logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
|
291 |
-
move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
|
292 |
-
# clear temp
|
293 |
-
logger.info(wording.get('clearing_temp'), __name__.upper())
|
294 |
-
clear_temp(DeepFakeAI.globals.target_path)
|
295 |
-
# validate video
|
296 |
-
if is_video(DeepFakeAI.globals.output_path):
|
297 |
-
logger.info(wording.get('processing_video_succeed'), __name__.upper())
|
298 |
-
else:
|
299 |
-
logger.error(wording.get('processing_video_failed'), __name__.upper())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/download.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
import urllib.request
|
4 |
-
from typing import List
|
5 |
-
from concurrent.futures import ThreadPoolExecutor
|
6 |
-
from functools import lru_cache
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
import DeepFakeAI.globals
|
10 |
-
from DeepFakeAI import wording
|
11 |
-
from DeepFakeAI.filesystem import is_file
|
12 |
-
|
13 |
-
|
14 |
-
def conditional_download(download_directory_path : str, urls : List[str]) -> None:
|
15 |
-
with ThreadPoolExecutor() as executor:
|
16 |
-
for url in urls:
|
17 |
-
executor.submit(get_download_size, url)
|
18 |
-
for url in urls:
|
19 |
-
download_file_path = os.path.join(download_directory_path, os.path.basename(url))
|
20 |
-
initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0
|
21 |
-
total = get_download_size(url)
|
22 |
-
if initial < total:
|
23 |
-
with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = DeepFakeAI.globals.log_level in [ 'warn', 'error' ]) as progress:
|
24 |
-
subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])
|
25 |
-
current = initial
|
26 |
-
while current < total:
|
27 |
-
if is_file(download_file_path):
|
28 |
-
current = os.path.getsize(download_file_path)
|
29 |
-
progress.update(current - progress.n)
|
30 |
-
|
31 |
-
|
32 |
-
@lru_cache(maxsize = None)
|
33 |
-
def get_download_size(url : str) -> int:
|
34 |
-
try:
|
35 |
-
response = urllib.request.urlopen(url, timeout = 10)
|
36 |
-
return int(response.getheader('Content-Length'))
|
37 |
-
except (OSError, ValueError):
|
38 |
-
return 0
|
39 |
-
|
40 |
-
|
41 |
-
def is_download_done(url : str, file_path : str) -> bool:
|
42 |
-
if is_file(file_path):
|
43 |
-
return get_download_size(url) == os.path.getsize(file_path)
|
44 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/execution_helper.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
import onnxruntime
|
3 |
-
|
4 |
-
|
5 |
-
def encode_execution_providers(execution_providers : List[str]) -> List[str]:
|
6 |
-
return [ execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers ]
|
7 |
-
|
8 |
-
|
9 |
-
def decode_execution_providers(execution_providers: List[str]) -> List[str]:
|
10 |
-
available_execution_providers = onnxruntime.get_available_providers()
|
11 |
-
encoded_execution_providers = encode_execution_providers(available_execution_providers)
|
12 |
-
return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ]
|
13 |
-
|
14 |
-
|
15 |
-
def map_device(execution_providers : List[str]) -> str:
|
16 |
-
if 'CoreMLExecutionProvider' in execution_providers:
|
17 |
-
return 'mps'
|
18 |
-
if 'CUDAExecutionProvider' in execution_providers or 'ROCMExecutionProvider' in execution_providers :
|
19 |
-
return 'cuda'
|
20 |
-
if 'OpenVINOExecutionProvider' in execution_providers:
|
21 |
-
return 'mkl'
|
22 |
-
return 'cpu'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/face_analyser.py
DELETED
@@ -1,347 +0,0 @@
|
|
1 |
-
from typing import Any, Optional, List, Tuple
|
2 |
-
import threading
|
3 |
-
import cv2
|
4 |
-
import numpy
|
5 |
-
import onnxruntime
|
6 |
-
|
7 |
-
import DeepFakeAI.globals
|
8 |
-
from DeepFakeAI.download import conditional_download
|
9 |
-
from DeepFakeAI.face_store import get_static_faces, set_static_faces
|
10 |
-
from DeepFakeAI.face_helper import warp_face, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms
|
11 |
-
from DeepFakeAI.filesystem import resolve_relative_path
|
12 |
-
from DeepFakeAI.typing import Frame, Face, FaceSet, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelSet, Bbox, Kps, Score, Embedding
|
13 |
-
from DeepFakeAI.vision import resize_frame_dimension
|
14 |
-
|
15 |
-
FACE_ANALYSER = None
|
16 |
-
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
|
17 |
-
THREAD_LOCK : threading.Lock = threading.Lock()
|
18 |
-
MODELS : ModelSet =\
|
19 |
-
{
|
20 |
-
'face_detector_retinaface':
|
21 |
-
{
|
22 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/retinaface_10g.onnx',
|
23 |
-
'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx')
|
24 |
-
},
|
25 |
-
'face_detector_yunet':
|
26 |
-
{
|
27 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/yunet_2023mar.onnx',
|
28 |
-
'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx')
|
29 |
-
},
|
30 |
-
'face_recognizer_arcface_blendswap':
|
31 |
-
{
|
32 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/arcface_w600k_r50.onnx',
|
33 |
-
'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
|
34 |
-
},
|
35 |
-
'face_recognizer_arcface_inswapper':
|
36 |
-
{
|
37 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/arcface_w600k_r50.onnx',
|
38 |
-
'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
|
39 |
-
},
|
40 |
-
'face_recognizer_arcface_simswap':
|
41 |
-
{
|
42 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/arcface_simswap.onnx',
|
43 |
-
'path': resolve_relative_path('../.assets/models/arcface_simswap.onnx')
|
44 |
-
},
|
45 |
-
'gender_age':
|
46 |
-
{
|
47 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gender_age.onnx',
|
48 |
-
'path': resolve_relative_path('../.assets/models/gender_age.onnx')
|
49 |
-
}
|
50 |
-
}
|
51 |
-
|
52 |
-
|
53 |
-
def get_face_analyser() -> Any:
|
54 |
-
global FACE_ANALYSER
|
55 |
-
|
56 |
-
with THREAD_LOCK:
|
57 |
-
if FACE_ANALYSER is None:
|
58 |
-
if DeepFakeAI.globals.face_detector_model == 'retinaface':
|
59 |
-
face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = DeepFakeAI.globals.execution_providers)
|
60 |
-
if DeepFakeAI.globals.face_detector_model == 'yunet':
|
61 |
-
face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
|
62 |
-
if DeepFakeAI.globals.face_recognizer_model == 'arcface_blendswap':
|
63 |
-
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = DeepFakeAI.globals.execution_providers)
|
64 |
-
if DeepFakeAI.globals.face_recognizer_model == 'arcface_inswapper':
|
65 |
-
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = DeepFakeAI.globals.execution_providers)
|
66 |
-
if DeepFakeAI.globals.face_recognizer_model == 'arcface_simswap':
|
67 |
-
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = DeepFakeAI.globals.execution_providers)
|
68 |
-
gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = DeepFakeAI.globals.execution_providers)
|
69 |
-
FACE_ANALYSER =\
|
70 |
-
{
|
71 |
-
'face_detector': face_detector,
|
72 |
-
'face_recognizer': face_recognizer,
|
73 |
-
'gender_age': gender_age
|
74 |
-
}
|
75 |
-
return FACE_ANALYSER
|
76 |
-
|
77 |
-
|
78 |
-
def clear_face_analyser() -> Any:
|
79 |
-
global FACE_ANALYSER
|
80 |
-
|
81 |
-
FACE_ANALYSER = None
|
82 |
-
|
83 |
-
|
84 |
-
def pre_check() -> bool:
|
85 |
-
if not DeepFakeAI.globals.skip_download:
|
86 |
-
download_directory_path = resolve_relative_path('../.assets/models')
|
87 |
-
model_urls =\
|
88 |
-
[
|
89 |
-
MODELS.get('face_detector_retinaface').get('url'),
|
90 |
-
MODELS.get('face_detector_yunet').get('url'),
|
91 |
-
MODELS.get('face_recognizer_arcface_inswapper').get('url'),
|
92 |
-
MODELS.get('face_recognizer_arcface_simswap').get('url'),
|
93 |
-
MODELS.get('gender_age').get('url')
|
94 |
-
]
|
95 |
-
conditional_download(download_directory_path, model_urls)
|
96 |
-
return True
|
97 |
-
|
98 |
-
|
99 |
-
def extract_faces(frame: Frame) -> List[Face]:
|
100 |
-
face_detector_width, face_detector_height = map(int, DeepFakeAI.globals.face_detector_size.split('x'))
|
101 |
-
frame_height, frame_width, _ = frame.shape
|
102 |
-
temp_frame = resize_frame_dimension(frame, face_detector_width, face_detector_height)
|
103 |
-
temp_frame_height, temp_frame_width, _ = temp_frame.shape
|
104 |
-
ratio_height = frame_height / temp_frame_height
|
105 |
-
ratio_width = frame_width / temp_frame_width
|
106 |
-
if DeepFakeAI.globals.face_detector_model == 'retinaface':
|
107 |
-
bbox_list, kps_list, score_list = detect_with_retinaface(temp_frame, temp_frame_height, temp_frame_width, face_detector_height, face_detector_width, ratio_height, ratio_width)
|
108 |
-
return create_faces(frame, bbox_list, kps_list, score_list)
|
109 |
-
elif DeepFakeAI.globals.face_detector_model == 'yunet':
|
110 |
-
bbox_list, kps_list, score_list = detect_with_yunet(temp_frame, temp_frame_height, temp_frame_width, ratio_height, ratio_width)
|
111 |
-
return create_faces(frame, bbox_list, kps_list, score_list)
|
112 |
-
return []
|
113 |
-
|
114 |
-
|
115 |
-
def detect_with_retinaface(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, face_detector_height : int, face_detector_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
|
116 |
-
face_detector = get_face_analyser().get('face_detector')
|
117 |
-
bbox_list = []
|
118 |
-
kps_list = []
|
119 |
-
score_list = []
|
120 |
-
feature_strides = [ 8, 16, 32 ]
|
121 |
-
feature_map_channel = 3
|
122 |
-
anchor_total = 2
|
123 |
-
prepare_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
|
124 |
-
prepare_frame[:temp_frame_height, :temp_frame_width, :] = temp_frame
|
125 |
-
temp_frame = (prepare_frame - 127.5) / 128.0
|
126 |
-
temp_frame = numpy.expand_dims(temp_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
127 |
-
with THREAD_SEMAPHORE:
|
128 |
-
detections = face_detector.run(None,
|
129 |
-
{
|
130 |
-
face_detector.get_inputs()[0].name: temp_frame
|
131 |
-
})
|
132 |
-
for index, feature_stride in enumerate(feature_strides):
|
133 |
-
keep_indices = numpy.where(detections[index] >= DeepFakeAI.globals.face_detector_score)[0]
|
134 |
-
if keep_indices.any():
|
135 |
-
stride_height = face_detector_height // feature_stride
|
136 |
-
stride_width = face_detector_width // feature_stride
|
137 |
-
anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
|
138 |
-
bbox_raw = (detections[index + feature_map_channel] * feature_stride)
|
139 |
-
kps_raw = detections[index + feature_map_channel * 2] * feature_stride
|
140 |
-
for bbox in distance_to_bbox(anchors, bbox_raw)[keep_indices]:
|
141 |
-
bbox_list.append(numpy.array(
|
142 |
-
[
|
143 |
-
bbox[0] * ratio_width,
|
144 |
-
bbox[1] * ratio_height,
|
145 |
-
bbox[2] * ratio_width,
|
146 |
-
bbox[3] * ratio_height
|
147 |
-
]))
|
148 |
-
for kps in distance_to_kps(anchors, kps_raw)[keep_indices]:
|
149 |
-
kps_list.append(kps * [ ratio_width, ratio_height ])
|
150 |
-
for score in detections[index][keep_indices]:
|
151 |
-
score_list.append(score[0])
|
152 |
-
return bbox_list, kps_list, score_list
|
153 |
-
|
154 |
-
|
155 |
-
def detect_with_yunet(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
|
156 |
-
face_detector = get_face_analyser().get('face_detector')
|
157 |
-
face_detector.setInputSize((temp_frame_width, temp_frame_height))
|
158 |
-
face_detector.setScoreThreshold(DeepFakeAI.globals.face_detector_score)
|
159 |
-
bbox_list = []
|
160 |
-
kps_list = []
|
161 |
-
score_list = []
|
162 |
-
with THREAD_SEMAPHORE:
|
163 |
-
_, detections = face_detector.detect(temp_frame)
|
164 |
-
if detections.any():
|
165 |
-
for detection in detections:
|
166 |
-
bbox_list.append(numpy.array(
|
167 |
-
[
|
168 |
-
detection[0] * ratio_width,
|
169 |
-
detection[1] * ratio_height,
|
170 |
-
(detection[0] + detection[2]) * ratio_width,
|
171 |
-
(detection[1] + detection[3]) * ratio_height
|
172 |
-
]))
|
173 |
-
kps_list.append(detection[4:14].reshape((5, 2)) * [ ratio_width, ratio_height])
|
174 |
-
score_list.append(detection[14])
|
175 |
-
return bbox_list, kps_list, score_list
|
176 |
-
|
177 |
-
|
178 |
-
def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face]:
|
179 |
-
faces = []
|
180 |
-
if DeepFakeAI.globals.face_detector_score > 0:
|
181 |
-
sort_indices = numpy.argsort(-numpy.array(score_list))
|
182 |
-
bbox_list = [ bbox_list[index] for index in sort_indices ]
|
183 |
-
kps_list = [ kps_list[index] for index in sort_indices ]
|
184 |
-
score_list = [ score_list[index] for index in sort_indices ]
|
185 |
-
keep_indices = apply_nms(bbox_list, 0.4)
|
186 |
-
for index in keep_indices:
|
187 |
-
bbox = bbox_list[index]
|
188 |
-
kps = kps_list[index]
|
189 |
-
score = score_list[index]
|
190 |
-
embedding, normed_embedding = calc_embedding(frame, kps)
|
191 |
-
gender, age = detect_gender_age(frame, kps)
|
192 |
-
faces.append(Face(
|
193 |
-
bbox = bbox,
|
194 |
-
kps = kps,
|
195 |
-
score = score,
|
196 |
-
embedding = embedding,
|
197 |
-
normed_embedding = normed_embedding,
|
198 |
-
gender = gender,
|
199 |
-
age = age
|
200 |
-
))
|
201 |
-
return faces
|
202 |
-
|
203 |
-
|
204 |
-
def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]:
|
205 |
-
face_recognizer = get_face_analyser().get('face_recognizer')
|
206 |
-
crop_frame, matrix = warp_face(temp_frame, kps, 'arcface_112_v2', (112, 112))
|
207 |
-
crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1
|
208 |
-
crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1)
|
209 |
-
crop_frame = numpy.expand_dims(crop_frame, axis = 0)
|
210 |
-
embedding = face_recognizer.run(None,
|
211 |
-
{
|
212 |
-
face_recognizer.get_inputs()[0].name: crop_frame
|
213 |
-
})[0]
|
214 |
-
embedding = embedding.ravel()
|
215 |
-
normed_embedding = embedding / numpy.linalg.norm(embedding)
|
216 |
-
return embedding, normed_embedding
|
217 |
-
|
218 |
-
|
219 |
-
def detect_gender_age(frame : Frame, kps : Kps) -> Tuple[int, int]:
|
220 |
-
gender_age = get_face_analyser().get('gender_age')
|
221 |
-
crop_frame, affine_matrix = warp_face(frame, kps, 'arcface_112_v2', (96, 96))
|
222 |
-
crop_frame = numpy.expand_dims(crop_frame, axis = 0).transpose(0, 3, 1, 2).astype(numpy.float32)
|
223 |
-
prediction = gender_age.run(None,
|
224 |
-
{
|
225 |
-
gender_age.get_inputs()[0].name: crop_frame
|
226 |
-
})[0][0]
|
227 |
-
gender = int(numpy.argmax(prediction[:2]))
|
228 |
-
age = int(numpy.round(prediction[2] * 100))
|
229 |
-
return gender, age
|
230 |
-
|
231 |
-
|
232 |
-
def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
|
233 |
-
many_faces = get_many_faces(frame)
|
234 |
-
if many_faces:
|
235 |
-
try:
|
236 |
-
return many_faces[position]
|
237 |
-
except IndexError:
|
238 |
-
return many_faces[-1]
|
239 |
-
return None
|
240 |
-
|
241 |
-
|
242 |
-
def get_average_face(frames : List[Frame], position : int = 0) -> Optional[Face]:
|
243 |
-
average_face = None
|
244 |
-
faces = []
|
245 |
-
embedding_list = []
|
246 |
-
normed_embedding_list = []
|
247 |
-
for frame in frames:
|
248 |
-
face = get_one_face(frame, position)
|
249 |
-
if face:
|
250 |
-
faces.append(face)
|
251 |
-
embedding_list.append(face.embedding)
|
252 |
-
normed_embedding_list.append(face.normed_embedding)
|
253 |
-
if faces:
|
254 |
-
average_face = Face(
|
255 |
-
bbox = faces[0].bbox,
|
256 |
-
kps = faces[0].kps,
|
257 |
-
score = faces[0].score,
|
258 |
-
embedding = numpy.mean(embedding_list, axis = 0),
|
259 |
-
normed_embedding = numpy.mean(normed_embedding_list, axis = 0),
|
260 |
-
gender = faces[0].gender,
|
261 |
-
age = faces[0].age
|
262 |
-
)
|
263 |
-
return average_face
|
264 |
-
|
265 |
-
|
266 |
-
def get_many_faces(frame : Frame) -> List[Face]:
|
267 |
-
try:
|
268 |
-
faces_cache = get_static_faces(frame)
|
269 |
-
if faces_cache:
|
270 |
-
faces = faces_cache
|
271 |
-
else:
|
272 |
-
faces = extract_faces(frame)
|
273 |
-
set_static_faces(frame, faces)
|
274 |
-
if DeepFakeAI.globals.face_analyser_order:
|
275 |
-
faces = sort_by_order(faces, DeepFakeAI.globals.face_analyser_order)
|
276 |
-
if DeepFakeAI.globals.face_analyser_age:
|
277 |
-
faces = filter_by_age(faces, DeepFakeAI.globals.face_analyser_age)
|
278 |
-
if DeepFakeAI.globals.face_analyser_gender:
|
279 |
-
faces = filter_by_gender(faces, DeepFakeAI.globals.face_analyser_gender)
|
280 |
-
return faces
|
281 |
-
except (AttributeError, ValueError):
|
282 |
-
return []
|
283 |
-
|
284 |
-
|
285 |
-
def find_similar_faces(frame : Frame, reference_faces : FaceSet, face_distance : float) -> List[Face]:
|
286 |
-
similar_faces : List[Face] = []
|
287 |
-
many_faces = get_many_faces(frame)
|
288 |
-
|
289 |
-
if reference_faces:
|
290 |
-
for reference_set in reference_faces:
|
291 |
-
if not similar_faces:
|
292 |
-
for reference_face in reference_faces[reference_set]:
|
293 |
-
for face in many_faces:
|
294 |
-
if compare_faces(face, reference_face, face_distance):
|
295 |
-
similar_faces.append(face)
|
296 |
-
return similar_faces
|
297 |
-
|
298 |
-
|
299 |
-
def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool:
|
300 |
-
if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
|
301 |
-
current_face_distance = 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
|
302 |
-
return current_face_distance < face_distance
|
303 |
-
return False
|
304 |
-
|
305 |
-
|
306 |
-
def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]:
|
307 |
-
if order == 'left-right':
|
308 |
-
return sorted(faces, key = lambda face: face.bbox[0])
|
309 |
-
if order == 'right-left':
|
310 |
-
return sorted(faces, key = lambda face: face.bbox[0], reverse = True)
|
311 |
-
if order == 'top-bottom':
|
312 |
-
return sorted(faces, key = lambda face: face.bbox[1])
|
313 |
-
if order == 'bottom-top':
|
314 |
-
return sorted(faces, key = lambda face: face.bbox[1], reverse = True)
|
315 |
-
if order == 'small-large':
|
316 |
-
return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]))
|
317 |
-
if order == 'large-small':
|
318 |
-
return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]), reverse = True)
|
319 |
-
if order == 'best-worst':
|
320 |
-
return sorted(faces, key = lambda face: face.score, reverse = True)
|
321 |
-
if order == 'worst-best':
|
322 |
-
return sorted(faces, key = lambda face: face.score)
|
323 |
-
return faces
|
324 |
-
|
325 |
-
|
326 |
-
def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
|
327 |
-
filter_faces = []
|
328 |
-
for face in faces:
|
329 |
-
if face.age < 13 and age == 'child':
|
330 |
-
filter_faces.append(face)
|
331 |
-
elif face.age < 19 and age == 'teen':
|
332 |
-
filter_faces.append(face)
|
333 |
-
elif face.age < 60 and age == 'adult':
|
334 |
-
filter_faces.append(face)
|
335 |
-
elif face.age > 59 and age == 'senior':
|
336 |
-
filter_faces.append(face)
|
337 |
-
return filter_faces
|
338 |
-
|
339 |
-
|
340 |
-
def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
|
341 |
-
filter_faces = []
|
342 |
-
for face in faces:
|
343 |
-
if face.gender == 0 and gender == 'female':
|
344 |
-
filter_faces.append(face)
|
345 |
-
if face.gender == 1 and gender == 'male':
|
346 |
-
filter_faces.append(face)
|
347 |
-
return filter_faces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/face_helper.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
from typing import Any, Dict, Tuple, List
|
2 |
-
from cv2.typing import Size
|
3 |
-
from functools import lru_cache
|
4 |
-
import cv2
|
5 |
-
import numpy
|
6 |
-
|
7 |
-
from DeepFakeAI.typing import Bbox, Kps, Frame, Mask, Matrix, Template
|
8 |
-
|
9 |
-
TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
|
10 |
-
{
|
11 |
-
'arcface_112_v1': numpy.array(
|
12 |
-
[
|
13 |
-
[ 39.7300, 51.1380 ],
|
14 |
-
[ 72.2700, 51.1380 ],
|
15 |
-
[ 56.0000, 68.4930 ],
|
16 |
-
[ 42.4630, 87.0100 ],
|
17 |
-
[ 69.5370, 87.0100 ]
|
18 |
-
]),
|
19 |
-
'arcface_112_v2': numpy.array(
|
20 |
-
[
|
21 |
-
[ 38.2946, 51.6963 ],
|
22 |
-
[ 73.5318, 51.5014 ],
|
23 |
-
[ 56.0252, 71.7366 ],
|
24 |
-
[ 41.5493, 92.3655 ],
|
25 |
-
[ 70.7299, 92.2041 ]
|
26 |
-
]),
|
27 |
-
'arcface_128_v2': numpy.array(
|
28 |
-
[
|
29 |
-
[ 46.2946, 51.6963 ],
|
30 |
-
[ 81.5318, 51.5014 ],
|
31 |
-
[ 64.0252, 71.7366 ],
|
32 |
-
[ 49.5493, 92.3655 ],
|
33 |
-
[ 78.7299, 92.2041 ]
|
34 |
-
]),
|
35 |
-
'ffhq_512': numpy.array(
|
36 |
-
[
|
37 |
-
[ 192.98138, 239.94708 ],
|
38 |
-
[ 318.90277, 240.1936 ],
|
39 |
-
[ 256.63416, 314.01935 ],
|
40 |
-
[ 201.26117, 371.41043 ],
|
41 |
-
[ 313.08905, 371.15118 ]
|
42 |
-
])
|
43 |
-
}
|
44 |
-
|
45 |
-
|
46 |
-
def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:
|
47 |
-
normed_template = TEMPLATES.get(template) * size[1] / size[0]
|
48 |
-
affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
|
49 |
-
crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)
|
50 |
-
return crop_frame, affine_matrix
|
51 |
-
|
52 |
-
|
53 |
-
def paste_back(temp_frame : Frame, crop_frame: Frame, crop_mask : Mask, affine_matrix : Matrix) -> Frame:
|
54 |
-
inverse_matrix = cv2.invertAffineTransform(affine_matrix)
|
55 |
-
temp_frame_size = temp_frame.shape[:2][::-1]
|
56 |
-
inverse_crop_mask = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size).clip(0, 1)
|
57 |
-
inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
|
58 |
-
paste_frame = temp_frame.copy()
|
59 |
-
paste_frame[:, :, 0] = inverse_crop_mask * inverse_crop_frame[:, :, 0] + (1 - inverse_crop_mask) * temp_frame[:, :, 0]
|
60 |
-
paste_frame[:, :, 1] = inverse_crop_mask * inverse_crop_frame[:, :, 1] + (1 - inverse_crop_mask) * temp_frame[:, :, 1]
|
61 |
-
paste_frame[:, :, 2] = inverse_crop_mask * inverse_crop_frame[:, :, 2] + (1 - inverse_crop_mask) * temp_frame[:, :, 2]
|
62 |
-
return paste_frame
|
63 |
-
|
64 |
-
|
65 |
-
@lru_cache(maxsize = None)
|
66 |
-
def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]:
|
67 |
-
y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
|
68 |
-
anchors = numpy.stack((y, x), axis = -1)
|
69 |
-
anchors = (anchors * feature_stride).reshape((-1, 2))
|
70 |
-
anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2))
|
71 |
-
return anchors
|
72 |
-
|
73 |
-
|
74 |
-
def distance_to_bbox(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Bbox:
|
75 |
-
x1 = points[:, 0] - distance[:, 0]
|
76 |
-
y1 = points[:, 1] - distance[:, 1]
|
77 |
-
x2 = points[:, 0] + distance[:, 2]
|
78 |
-
y2 = points[:, 1] + distance[:, 3]
|
79 |
-
bbox = numpy.column_stack([ x1, y1, x2, y2 ])
|
80 |
-
return bbox
|
81 |
-
|
82 |
-
|
83 |
-
def distance_to_kps(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Kps:
|
84 |
-
x = points[:, 0::2] + distance[:, 0::2]
|
85 |
-
y = points[:, 1::2] + distance[:, 1::2]
|
86 |
-
kps = numpy.stack((x, y), axis = -1)
|
87 |
-
return kps
|
88 |
-
|
89 |
-
|
90 |
-
def apply_nms(bbox_list : List[Bbox], iou_threshold : float) -> List[int]:
|
91 |
-
keep_indices = []
|
92 |
-
dimension_list = numpy.reshape(bbox_list, (-1, 4))
|
93 |
-
x1 = dimension_list[:, 0]
|
94 |
-
y1 = dimension_list[:, 1]
|
95 |
-
x2 = dimension_list[:, 2]
|
96 |
-
y2 = dimension_list[:, 3]
|
97 |
-
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
98 |
-
indices = numpy.arange(len(bbox_list))
|
99 |
-
while indices.size > 0:
|
100 |
-
index = indices[0]
|
101 |
-
remain_indices = indices[1:]
|
102 |
-
keep_indices.append(index)
|
103 |
-
xx1 = numpy.maximum(x1[index], x1[remain_indices])
|
104 |
-
yy1 = numpy.maximum(y1[index], y1[remain_indices])
|
105 |
-
xx2 = numpy.minimum(x2[index], x2[remain_indices])
|
106 |
-
yy2 = numpy.minimum(y2[index], y2[remain_indices])
|
107 |
-
width = numpy.maximum(0, xx2 - xx1 + 1)
|
108 |
-
height = numpy.maximum(0, yy2 - yy1 + 1)
|
109 |
-
iou = width * height / (areas[index] + areas[remain_indices] - width * height)
|
110 |
-
indices = indices[numpy.where(iou <= iou_threshold)[0] + 1]
|
111 |
-
return keep_indices
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/face_masker.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
from typing import Any, Dict, List
|
2 |
-
from cv2.typing import Size
|
3 |
-
from functools import lru_cache
|
4 |
-
import threading
|
5 |
-
import cv2
|
6 |
-
import numpy
|
7 |
-
import onnxruntime
|
8 |
-
|
9 |
-
import DeepFakeAI.globals
|
10 |
-
from DeepFakeAI.typing import Frame, Mask, Padding, FaceMaskRegion, ModelSet
|
11 |
-
from DeepFakeAI.filesystem import resolve_relative_path
|
12 |
-
from DeepFakeAI.download import conditional_download
|
13 |
-
|
14 |
-
FACE_OCCLUDER = None
|
15 |
-
FACE_PARSER = None
|
16 |
-
THREAD_LOCK : threading.Lock = threading.Lock()
|
17 |
-
MODELS : ModelSet =\
|
18 |
-
{
|
19 |
-
'face_occluder':
|
20 |
-
{
|
21 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/face_occluder.onnx',
|
22 |
-
'path': resolve_relative_path('../.assets/models/face_occluder.onnx')
|
23 |
-
},
|
24 |
-
'face_parser':
|
25 |
-
{
|
26 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/face_parser.onnx',
|
27 |
-
'path': resolve_relative_path('../.assets/models/face_parser.onnx')
|
28 |
-
}
|
29 |
-
}
|
30 |
-
FACE_MASK_REGIONS : Dict[FaceMaskRegion, int] =\
|
31 |
-
{
|
32 |
-
'skin': 1,
|
33 |
-
'left-eyebrow': 2,
|
34 |
-
'right-eyebrow': 3,
|
35 |
-
'left-eye': 4,
|
36 |
-
'right-eye': 5,
|
37 |
-
'eye-glasses': 6,
|
38 |
-
'nose': 10,
|
39 |
-
'mouth': 11,
|
40 |
-
'upper-lip': 12,
|
41 |
-
'lower-lip': 13
|
42 |
-
}
|
43 |
-
|
44 |
-
|
45 |
-
def get_face_occluder() -> Any:
|
46 |
-
global FACE_OCCLUDER
|
47 |
-
|
48 |
-
with THREAD_LOCK:
|
49 |
-
if FACE_OCCLUDER is None:
|
50 |
-
model_path = MODELS.get('face_occluder').get('path')
|
51 |
-
FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers)
|
52 |
-
return FACE_OCCLUDER
|
53 |
-
|
54 |
-
|
55 |
-
def get_face_parser() -> Any:
|
56 |
-
global FACE_PARSER
|
57 |
-
|
58 |
-
with THREAD_LOCK:
|
59 |
-
if FACE_PARSER is None:
|
60 |
-
model_path = MODELS.get('face_parser').get('path')
|
61 |
-
FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers)
|
62 |
-
return FACE_PARSER
|
63 |
-
|
64 |
-
|
65 |
-
def clear_face_occluder() -> None:
|
66 |
-
global FACE_OCCLUDER
|
67 |
-
|
68 |
-
FACE_OCCLUDER = None
|
69 |
-
|
70 |
-
|
71 |
-
def clear_face_parser() -> None:
|
72 |
-
global FACE_PARSER
|
73 |
-
|
74 |
-
FACE_PARSER = None
|
75 |
-
|
76 |
-
|
77 |
-
def pre_check() -> bool:
|
78 |
-
if not DeepFakeAI.globals.skip_download:
|
79 |
-
download_directory_path = resolve_relative_path('../.assets/models')
|
80 |
-
model_urls =\
|
81 |
-
[
|
82 |
-
MODELS.get('face_occluder').get('url'),
|
83 |
-
MODELS.get('face_parser').get('url'),
|
84 |
-
]
|
85 |
-
conditional_download(download_directory_path, model_urls)
|
86 |
-
return True
|
87 |
-
|
88 |
-
|
89 |
-
@lru_cache(maxsize = None)
|
90 |
-
def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Mask:
|
91 |
-
blur_amount = int(crop_size[0] * 0.5 * face_mask_blur)
|
92 |
-
blur_area = max(blur_amount // 2, 1)
|
93 |
-
box_mask = numpy.ones(crop_size, numpy.float32)
|
94 |
-
box_mask[:max(blur_area, int(crop_size[1] * face_mask_padding[0] / 100)), :] = 0
|
95 |
-
box_mask[-max(blur_area, int(crop_size[1] * face_mask_padding[2] / 100)):, :] = 0
|
96 |
-
box_mask[:, :max(blur_area, int(crop_size[0] * face_mask_padding[3] / 100))] = 0
|
97 |
-
box_mask[:, -max(blur_area, int(crop_size[0] * face_mask_padding[1] / 100)):] = 0
|
98 |
-
if blur_amount > 0:
|
99 |
-
box_mask = cv2.GaussianBlur(box_mask, (0, 0), blur_amount * 0.25)
|
100 |
-
return box_mask
|
101 |
-
|
102 |
-
|
103 |
-
def create_occlusion_mask(crop_frame : Frame) -> Mask:
|
104 |
-
face_occluder = get_face_occluder()
|
105 |
-
prepare_frame = cv2.resize(crop_frame, face_occluder.get_inputs()[0].shape[1:3][::-1])
|
106 |
-
prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32) / 255
|
107 |
-
prepare_frame = prepare_frame.transpose(0, 1, 2, 3)
|
108 |
-
occlusion_mask = face_occluder.run(None,
|
109 |
-
{
|
110 |
-
face_occluder.get_inputs()[0].name: prepare_frame
|
111 |
-
})[0][0]
|
112 |
-
occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32)
|
113 |
-
occlusion_mask = cv2.resize(occlusion_mask, crop_frame.shape[:2][::-1])
|
114 |
-
return occlusion_mask
|
115 |
-
|
116 |
-
|
117 |
-
def create_region_mask(crop_frame : Frame, face_mask_regions : List[FaceMaskRegion]) -> Mask:
|
118 |
-
face_parser = get_face_parser()
|
119 |
-
prepare_frame = cv2.flip(cv2.resize(crop_frame, (512, 512)), 1)
|
120 |
-
prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32)[:, :, ::-1] / 127.5 - 1
|
121 |
-
prepare_frame = prepare_frame.transpose(0, 3, 1, 2)
|
122 |
-
region_mask = face_parser.run(None,
|
123 |
-
{
|
124 |
-
face_parser.get_inputs()[0].name: prepare_frame
|
125 |
-
})[0][0]
|
126 |
-
region_mask = numpy.isin(region_mask.argmax(0), [ FACE_MASK_REGIONS[region] for region in face_mask_regions ])
|
127 |
-
region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_frame.shape[:2][::-1])
|
128 |
-
return region_mask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/face_store.py
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
from typing import Optional, List
|
2 |
-
import hashlib
|
3 |
-
|
4 |
-
from DeepFakeAI.typing import Frame, Face, FaceStore, FaceSet
|
5 |
-
|
6 |
-
FACE_STORE: FaceStore =\
|
7 |
-
{
|
8 |
-
'static_faces': {},
|
9 |
-
'reference_faces': {}
|
10 |
-
}
|
11 |
-
|
12 |
-
|
13 |
-
def get_static_faces(frame : Frame) -> Optional[List[Face]]:
|
14 |
-
frame_hash = create_frame_hash(frame)
|
15 |
-
if frame_hash in FACE_STORE['static_faces']:
|
16 |
-
return FACE_STORE['static_faces'][frame_hash]
|
17 |
-
return None
|
18 |
-
|
19 |
-
|
20 |
-
def set_static_faces(frame : Frame, faces : List[Face]) -> None:
|
21 |
-
frame_hash = create_frame_hash(frame)
|
22 |
-
if frame_hash:
|
23 |
-
FACE_STORE['static_faces'][frame_hash] = faces
|
24 |
-
|
25 |
-
|
26 |
-
def clear_static_faces() -> None:
|
27 |
-
FACE_STORE['static_faces'] = {}
|
28 |
-
|
29 |
-
|
30 |
-
def create_frame_hash(frame: Frame) -> Optional[str]:
|
31 |
-
return hashlib.sha1(frame.tobytes()).hexdigest() if frame.any() else None
|
32 |
-
|
33 |
-
|
34 |
-
def get_reference_faces() -> Optional[FaceSet]:
|
35 |
-
if FACE_STORE['reference_faces']:
|
36 |
-
return FACE_STORE['reference_faces']
|
37 |
-
return None
|
38 |
-
|
39 |
-
|
40 |
-
def append_reference_face(name : str, face : Face) -> None:
|
41 |
-
if name not in FACE_STORE['reference_faces']:
|
42 |
-
FACE_STORE['reference_faces'][name] = []
|
43 |
-
FACE_STORE['reference_faces'][name].append(face)
|
44 |
-
|
45 |
-
|
46 |
-
def clear_reference_faces() -> None:
|
47 |
-
FACE_STORE['reference_faces'] = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/ffmpeg.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
import subprocess
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
from DeepFakeAI import logger
|
6 |
-
from DeepFakeAI.filesystem import get_temp_frames_pattern, get_temp_output_video_path
|
7 |
-
from DeepFakeAI.vision import detect_fps
|
8 |
-
|
9 |
-
|
10 |
-
def run_ffmpeg(args : List[str]) -> bool:
|
11 |
-
commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
|
12 |
-
commands.extend(args)
|
13 |
-
try:
|
14 |
-
subprocess.run(commands, stderr = subprocess.PIPE, check = True)
|
15 |
-
return True
|
16 |
-
except subprocess.CalledProcessError as exception:
|
17 |
-
logger.debug(exception.stderr.decode().strip(), __name__.upper())
|
18 |
-
return False
|
19 |
-
|
20 |
-
|
21 |
-
def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]:
|
22 |
-
commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
|
23 |
-
commands.extend(args)
|
24 |
-
return subprocess.Popen(commands, stdin = subprocess.PIPE)
|
25 |
-
|
26 |
-
|
27 |
-
def extract_frames(target_path : str, fps : float) -> bool:
|
28 |
-
temp_frame_compression = round(31 - (DeepFakeAI.globals.temp_frame_quality * 0.31))
|
29 |
-
trim_frame_start = DeepFakeAI.globals.trim_frame_start
|
30 |
-
trim_frame_end = DeepFakeAI.globals.trim_frame_end
|
31 |
-
temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
|
32 |
-
commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_compression), '-pix_fmt', 'rgb24' ]
|
33 |
-
if trim_frame_start is not None and trim_frame_end is not None:
|
34 |
-
commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
|
35 |
-
elif trim_frame_start is not None:
|
36 |
-
commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ])
|
37 |
-
elif trim_frame_end is not None:
|
38 |
-
commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
|
39 |
-
else:
|
40 |
-
commands.extend([ '-vf', 'fps=' + str(fps) ])
|
41 |
-
commands.extend([ '-vsync', '0', temp_frames_pattern ])
|
42 |
-
return run_ffmpeg(commands)
|
43 |
-
|
44 |
-
|
45 |
-
def compress_image(output_path : str) -> bool:
|
46 |
-
output_image_compression = round(31 - (DeepFakeAI.globals.output_image_quality * 0.31))
|
47 |
-
commands = [ '-hwaccel', 'auto', '-i', output_path, '-q:v', str(output_image_compression), '-y', output_path ]
|
48 |
-
return run_ffmpeg(commands)
|
49 |
-
|
50 |
-
|
51 |
-
def merge_video(target_path : str, fps : float) -> bool:
|
52 |
-
temp_output_video_path = get_temp_output_video_path(target_path)
|
53 |
-
temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
|
54 |
-
commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', temp_frames_pattern, '-c:v', DeepFakeAI.globals.output_video_encoder ]
|
55 |
-
if DeepFakeAI.globals.output_video_encoder in [ 'libx264', 'libx265' ]:
|
56 |
-
output_video_compression = round(51 - (DeepFakeAI.globals.output_video_quality * 0.51))
|
57 |
-
commands.extend([ '-crf', str(output_video_compression) ])
|
58 |
-
if DeepFakeAI.globals.output_video_encoder in [ 'libvpx-vp9' ]:
|
59 |
-
output_video_compression = round(63 - (DeepFakeAI.globals.output_video_quality * 0.63))
|
60 |
-
commands.extend([ '-crf', str(output_video_compression) ])
|
61 |
-
if DeepFakeAI.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
|
62 |
-
output_video_compression = round(51 - (DeepFakeAI.globals.output_video_quality * 0.51))
|
63 |
-
commands.extend([ '-cq', str(output_video_compression) ])
|
64 |
-
commands.extend([ '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_output_video_path ])
|
65 |
-
return run_ffmpeg(commands)
|
66 |
-
|
67 |
-
|
68 |
-
def restore_audio(target_path : str, output_path : str) -> bool:
|
69 |
-
fps = detect_fps(target_path)
|
70 |
-
trim_frame_start = DeepFakeAI.globals.trim_frame_start
|
71 |
-
trim_frame_end = DeepFakeAI.globals.trim_frame_end
|
72 |
-
temp_output_video_path = get_temp_output_video_path(target_path)
|
73 |
-
commands = [ '-hwaccel', 'auto', '-i', temp_output_video_path ]
|
74 |
-
if trim_frame_start is not None:
|
75 |
-
start_time = trim_frame_start / fps
|
76 |
-
commands.extend([ '-ss', str(start_time) ])
|
77 |
-
if trim_frame_end is not None:
|
78 |
-
end_time = trim_frame_end / fps
|
79 |
-
commands.extend([ '-to', str(end_time) ])
|
80 |
-
commands.extend([ '-i', target_path, '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ])
|
81 |
-
return run_ffmpeg(commands)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/filesystem.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
import glob
|
3 |
-
import os
|
4 |
-
import shutil
|
5 |
-
import tempfile
|
6 |
-
import filetype
|
7 |
-
from pathlib import Path
|
8 |
-
|
9 |
-
import DeepFakeAI.globals
|
10 |
-
|
11 |
-
TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'DeepFakeAI')
|
12 |
-
TEMP_OUTPUT_VIDEO_NAME = 'temp.mp4'
|
13 |
-
|
14 |
-
|
15 |
-
def get_temp_frame_paths(target_path : str) -> List[str]:
|
16 |
-
temp_frames_pattern = get_temp_frames_pattern(target_path, '*')
|
17 |
-
return sorted(glob.glob(temp_frames_pattern))
|
18 |
-
|
19 |
-
|
20 |
-
def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str:
|
21 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
22 |
-
return os.path.join(temp_directory_path, temp_frame_prefix + '.' + DeepFakeAI.globals.temp_frame_format)
|
23 |
-
|
24 |
-
|
25 |
-
def get_temp_directory_path(target_path : str) -> str:
|
26 |
-
target_name, _ = os.path.splitext(os.path.basename(target_path))
|
27 |
-
return os.path.join(TEMP_DIRECTORY_PATH, target_name)
|
28 |
-
|
29 |
-
|
30 |
-
def get_temp_output_video_path(target_path : str) -> str:
|
31 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
32 |
-
return os.path.join(temp_directory_path, TEMP_OUTPUT_VIDEO_NAME)
|
33 |
-
|
34 |
-
|
35 |
-
def create_temp(target_path : str) -> None:
|
36 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
37 |
-
Path(temp_directory_path).mkdir(parents = True, exist_ok = True)
|
38 |
-
|
39 |
-
|
40 |
-
def move_temp(target_path : str, output_path : str) -> None:
|
41 |
-
temp_output_video_path = get_temp_output_video_path(target_path)
|
42 |
-
if is_file(temp_output_video_path):
|
43 |
-
if is_file(output_path):
|
44 |
-
os.remove(output_path)
|
45 |
-
shutil.move(temp_output_video_path, output_path)
|
46 |
-
|
47 |
-
|
48 |
-
def clear_temp(target_path : str) -> None:
|
49 |
-
temp_directory_path = get_temp_directory_path(target_path)
|
50 |
-
parent_directory_path = os.path.dirname(temp_directory_path)
|
51 |
-
if not DeepFakeAI.globals.keep_temp and is_directory(temp_directory_path):
|
52 |
-
shutil.rmtree(temp_directory_path)
|
53 |
-
if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
|
54 |
-
os.rmdir(parent_directory_path)
|
55 |
-
|
56 |
-
|
57 |
-
def is_file(file_path : str) -> bool:
|
58 |
-
return bool(file_path and os.path.isfile(file_path))
|
59 |
-
|
60 |
-
|
61 |
-
def is_directory(directory_path : str) -> bool:
|
62 |
-
return bool(directory_path and os.path.isdir(directory_path))
|
63 |
-
|
64 |
-
|
65 |
-
def is_image(image_path : str) -> bool:
|
66 |
-
if is_file(image_path):
|
67 |
-
return filetype.helpers.is_image(image_path)
|
68 |
-
return False
|
69 |
-
|
70 |
-
|
71 |
-
def are_images(image_paths : List[str]) -> bool:
|
72 |
-
if image_paths:
|
73 |
-
return all(is_image(image_path) for image_path in image_paths)
|
74 |
-
return False
|
75 |
-
|
76 |
-
|
77 |
-
def is_video(video_path : str) -> bool:
|
78 |
-
if is_file(video_path):
|
79 |
-
return filetype.helpers.is_video(video_path)
|
80 |
-
return False
|
81 |
-
|
82 |
-
|
83 |
-
def resolve_relative_path(path : str) -> str:
|
84 |
-
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
|
85 |
-
|
86 |
-
|
87 |
-
def list_module_names(path : str) -> Optional[List[str]]:
|
88 |
-
if os.path.exists(path):
|
89 |
-
files = os.listdir(path)
|
90 |
-
return [ Path(file).stem for file in files if not Path(file).stem.startswith(('.', '__')) ]
|
91 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/globals.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
|
3 |
-
from DeepFakeAI.typing import LogLevel, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
|
4 |
-
|
5 |
-
# general
|
6 |
-
source_paths : Optional[List[str]] = None
|
7 |
-
target_path : Optional[str] = None
|
8 |
-
output_path : Optional[str] = None
|
9 |
-
# misc
|
10 |
-
skip_download : Optional[bool] = None
|
11 |
-
headless : Optional[bool] = None
|
12 |
-
log_level : Optional[LogLevel] = None
|
13 |
-
# execution
|
14 |
-
execution_providers : List[str] = []
|
15 |
-
execution_thread_count : Optional[int] = None
|
16 |
-
execution_queue_count : Optional[int] = None
|
17 |
-
max_memory : Optional[int] = None
|
18 |
-
# face analyser
|
19 |
-
face_analyser_order : Optional[FaceAnalyserOrder] = None
|
20 |
-
face_analyser_age : Optional[FaceAnalyserAge] = None
|
21 |
-
face_analyser_gender : Optional[FaceAnalyserGender] = None
|
22 |
-
face_detector_model : Optional[FaceDetectorModel] = None
|
23 |
-
face_detector_size : Optional[str] = None
|
24 |
-
face_detector_score : Optional[float] = None
|
25 |
-
face_recognizer_model : Optional[FaceRecognizerModel] = None
|
26 |
-
# face selector
|
27 |
-
face_selector_mode : Optional[FaceSelectorMode] = None
|
28 |
-
reference_face_position : Optional[int] = None
|
29 |
-
reference_face_distance : Optional[float] = None
|
30 |
-
reference_frame_number : Optional[int] = None
|
31 |
-
# face mask
|
32 |
-
face_mask_types : Optional[List[FaceMaskType]] = None
|
33 |
-
face_mask_blur : Optional[float] = None
|
34 |
-
face_mask_padding : Optional[Padding] = None
|
35 |
-
face_mask_regions : Optional[List[FaceMaskRegion]] = None
|
36 |
-
# frame extraction
|
37 |
-
trim_frame_start : Optional[int] = None
|
38 |
-
trim_frame_end : Optional[int] = None
|
39 |
-
temp_frame_format : Optional[TempFrameFormat] = None
|
40 |
-
temp_frame_quality : Optional[int] = None
|
41 |
-
keep_temp : Optional[bool] = None
|
42 |
-
# output creation
|
43 |
-
output_image_quality : Optional[int] = None
|
44 |
-
output_video_encoder : Optional[OutputVideoEncoder] = None
|
45 |
-
output_video_quality : Optional[int] = None
|
46 |
-
keep_fps : Optional[bool] = None
|
47 |
-
skip_audio : Optional[bool] = None
|
48 |
-
# frame processors
|
49 |
-
frame_processors : List[str] = []
|
50 |
-
# uis
|
51 |
-
ui_layouts : List[str] = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/installer.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
from typing import Dict, Tuple
|
2 |
-
import sys
|
3 |
-
import os
|
4 |
-
import platform
|
5 |
-
import tempfile
|
6 |
-
import subprocess
|
7 |
-
from argparse import ArgumentParser, HelpFormatter
|
8 |
-
|
9 |
-
subprocess.call([ 'pip', 'install' , 'inquirer', '-q' ])
|
10 |
-
|
11 |
-
import inquirer
|
12 |
-
|
13 |
-
from DeepFakeAI import metadata, wording
|
14 |
-
|
15 |
-
TORCH : Dict[str, str] =\
|
16 |
-
{
|
17 |
-
'default': 'default',
|
18 |
-
'cpu': 'cpu'
|
19 |
-
}
|
20 |
-
ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\
|
21 |
-
{
|
22 |
-
'default': ('onnxruntime', '1.16.3')
|
23 |
-
}
|
24 |
-
if platform.system().lower() == 'linux' or platform.system().lower() == 'windows':
|
25 |
-
TORCH['cuda'] = 'cu118'
|
26 |
-
TORCH['cuda-nightly'] = 'cu121'
|
27 |
-
ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.16.3')
|
28 |
-
ONNXRUNTIMES['cuda-nightly'] = ('ort-nightly-gpu', '1.17.0.dev20231205004')
|
29 |
-
ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.16.0')
|
30 |
-
if platform.system().lower() == 'linux':
|
31 |
-
TORCH['rocm'] = 'rocm5.6'
|
32 |
-
ONNXRUNTIMES['rocm'] = ('onnxruntime-rocm', '1.16.3')
|
33 |
-
if platform.system().lower() == 'darwin':
|
34 |
-
ONNXRUNTIMES['coreml-legacy'] = ('onnxruntime-coreml', '1.13.1')
|
35 |
-
ONNXRUNTIMES['coreml-silicon'] = ('onnxruntime-silicon', '1.16.0')
|
36 |
-
if platform.system().lower() == 'windows':
|
37 |
-
ONNXRUNTIMES['directml'] = ('onnxruntime-directml', '1.16.3')
|
38 |
-
|
39 |
-
|
40 |
-
def cli() -> None:
|
41 |
-
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
|
42 |
-
program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), choices = TORCH.keys())
|
43 |
-
program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys())
|
44 |
-
program.add_argument('--skip-venv', help = wording.get('skip_venv_help'), action = 'store_true')
|
45 |
-
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
|
46 |
-
run(program)
|
47 |
-
|
48 |
-
|
49 |
-
def run(program : ArgumentParser) -> None:
|
50 |
-
args = program.parse_args()
|
51 |
-
python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor)
|
52 |
-
|
53 |
-
if not args.skip_venv:
|
54 |
-
os.environ['PIP_REQUIRE_VIRTUALENV'] = '1'
|
55 |
-
if args.torch and args.onnxruntime:
|
56 |
-
answers =\
|
57 |
-
{
|
58 |
-
'torch': args.torch,
|
59 |
-
'onnxruntime': args.onnxruntime
|
60 |
-
}
|
61 |
-
else:
|
62 |
-
answers = inquirer.prompt(
|
63 |
-
[
|
64 |
-
inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())),
|
65 |
-
inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys()))
|
66 |
-
])
|
67 |
-
if answers:
|
68 |
-
torch = answers['torch']
|
69 |
-
torch_wheel = TORCH[torch]
|
70 |
-
onnxruntime = answers['onnxruntime']
|
71 |
-
onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime]
|
72 |
-
|
73 |
-
subprocess.call([ 'pip', 'uninstall', 'torch', '-y', '-q' ])
|
74 |
-
if torch_wheel == 'default':
|
75 |
-
subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ])
|
76 |
-
else:
|
77 |
-
subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ])
|
78 |
-
if onnxruntime == 'rocm':
|
79 |
-
if python_id in [ 'cp39', 'cp310', 'cp311' ]:
|
80 |
-
wheel_name = 'onnxruntime_training-' + onnxruntime_version + '+rocm56-' + python_id + '-' + python_id + '-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'
|
81 |
-
wheel_path = os.path.join(tempfile.gettempdir(), wheel_name)
|
82 |
-
wheel_url = 'https://download.onnxruntime.ai/' + wheel_name
|
83 |
-
subprocess.call([ 'curl', '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ])
|
84 |
-
subprocess.call([ 'pip', 'uninstall', wheel_path, '-y', '-q' ])
|
85 |
-
subprocess.call([ 'pip', 'install', wheel_path ])
|
86 |
-
os.remove(wheel_path)
|
87 |
-
else:
|
88 |
-
subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ])
|
89 |
-
if onnxruntime == 'cuda-nightly':
|
90 |
-
subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-12-nightly/pypi/simple' ])
|
91 |
-
else:
|
92 |
-
subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/logger.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
from typing import Dict
|
2 |
-
from logging import basicConfig, getLogger, Logger, DEBUG, INFO, WARNING, ERROR
|
3 |
-
|
4 |
-
from DeepFakeAI.typing import LogLevel
|
5 |
-
|
6 |
-
|
7 |
-
def init(log_level : LogLevel) -> None:
|
8 |
-
basicConfig(format = None)
|
9 |
-
get_package_logger().setLevel(get_log_levels()[log_level])
|
10 |
-
|
11 |
-
|
12 |
-
def get_package_logger() -> Logger:
|
13 |
-
return getLogger('DeepFakeAI')
|
14 |
-
|
15 |
-
|
16 |
-
def debug(message : str, scope : str) -> None:
|
17 |
-
get_package_logger().debug('[' + scope + '] ' + message)
|
18 |
-
|
19 |
-
|
20 |
-
def info(message : str, scope : str) -> None:
|
21 |
-
get_package_logger().info('[' + scope + '] ' + message)
|
22 |
-
|
23 |
-
|
24 |
-
def warn(message : str, scope : str) -> None:
|
25 |
-
get_package_logger().warning('[' + scope + '] ' + message)
|
26 |
-
|
27 |
-
|
28 |
-
def error(message : str, scope : str) -> None:
|
29 |
-
get_package_logger().error('[' + scope + '] ' + message)
|
30 |
-
|
31 |
-
|
32 |
-
def get_log_levels() -> Dict[LogLevel, int]:
|
33 |
-
return\
|
34 |
-
{
|
35 |
-
'error': ERROR,
|
36 |
-
'warn': WARNING,
|
37 |
-
'info': INFO,
|
38 |
-
'debug': DEBUG
|
39 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/metadata.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
METADATA =\
|
2 |
-
{
|
3 |
-
'name': 'FaceFusion',
|
4 |
-
'description': 'Next generation face swapper and enhancer',
|
5 |
-
'version': '2.1.3',
|
6 |
-
'license': 'MIT',
|
7 |
-
'author': 'Henry Ruhs',
|
8 |
-
'url': 'https://DeepFakeAI.io'
|
9 |
-
}
|
10 |
-
|
11 |
-
|
12 |
-
def get(key : str) -> str:
|
13 |
-
return METADATA[key]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/normalizer.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
import os
|
3 |
-
|
4 |
-
from DeepFakeAI.filesystem import is_file, is_directory
|
5 |
-
from DeepFakeAI.typing import Padding
|
6 |
-
|
7 |
-
|
8 |
-
def normalize_output_path(source_paths : List[str], target_path : str, output_path : str) -> Optional[str]:
|
9 |
-
if is_file(target_path) and is_directory(output_path):
|
10 |
-
target_name, target_extension = os.path.splitext(os.path.basename(target_path))
|
11 |
-
if source_paths and is_file(source_paths[0]):
|
12 |
-
source_name, _ = os.path.splitext(os.path.basename(source_paths[0]))
|
13 |
-
return os.path.join(output_path, source_name + '-' + target_name + target_extension)
|
14 |
-
return os.path.join(output_path, target_name + target_extension)
|
15 |
-
if is_file(target_path) and output_path:
|
16 |
-
_, target_extension = os.path.splitext(os.path.basename(target_path))
|
17 |
-
output_name, output_extension = os.path.splitext(os.path.basename(output_path))
|
18 |
-
output_directory_path = os.path.dirname(output_path)
|
19 |
-
if is_directory(output_directory_path) and output_extension:
|
20 |
-
return os.path.join(output_directory_path, output_name + target_extension)
|
21 |
-
return None
|
22 |
-
return output_path
|
23 |
-
|
24 |
-
|
25 |
-
def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]:
|
26 |
-
if padding and len(padding) == 1:
|
27 |
-
return tuple([ padding[0], padding[0], padding[0], padding[0] ]) # type: ignore[return-value]
|
28 |
-
if padding and len(padding) == 2:
|
29 |
-
return tuple([ padding[0], padding[1], padding[0], padding[1] ]) # type: ignore[return-value]
|
30 |
-
if padding and len(padding) == 3:
|
31 |
-
return tuple([ padding[0], padding[1], padding[2], padding[1] ]) # type: ignore[return-value]
|
32 |
-
if padding and len(padding) == 4:
|
33 |
-
return tuple(padding) # type: ignore[return-value]
|
34 |
-
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/__init__.py
DELETED
File without changes
|
DeepFakeAI/processors/frame/__init__.py
DELETED
File without changes
|
DeepFakeAI/processors/frame/choices.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
import numpy
|
3 |
-
|
4 |
-
from DeepFakeAI.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
|
5 |
-
|
6 |
-
face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
|
7 |
-
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ]
|
8 |
-
frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
|
9 |
-
|
10 |
-
face_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
|
11 |
-
frame_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
|
12 |
-
|
13 |
-
face_debugger_items : List[FaceDebuggerItem] = [ 'bbox', 'kps', 'face-mask', 'score' ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/frame/core.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import importlib
|
3 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
4 |
-
from queue import Queue
|
5 |
-
from types import ModuleType
|
6 |
-
from typing import Any, List
|
7 |
-
from tqdm import tqdm
|
8 |
-
|
9 |
-
import DeepFakeAI.globals
|
10 |
-
from DeepFakeAI.typing import Process_Frames
|
11 |
-
from DeepFakeAI.execution_helper import encode_execution_providers
|
12 |
-
from DeepFakeAI import logger, wording
|
13 |
-
|
14 |
-
FRAME_PROCESSORS_MODULES : List[ModuleType] = []
|
15 |
-
FRAME_PROCESSORS_METHODS =\
|
16 |
-
[
|
17 |
-
'get_frame_processor',
|
18 |
-
'clear_frame_processor',
|
19 |
-
'get_options',
|
20 |
-
'set_options',
|
21 |
-
'register_args',
|
22 |
-
'apply_args',
|
23 |
-
'pre_check',
|
24 |
-
'pre_process',
|
25 |
-
'get_reference_frame',
|
26 |
-
'process_frame',
|
27 |
-
'process_frames',
|
28 |
-
'process_image',
|
29 |
-
'process_video',
|
30 |
-
'post_process'
|
31 |
-
]
|
32 |
-
|
33 |
-
|
34 |
-
def load_frame_processor_module(frame_processor : str) -> Any:
|
35 |
-
try:
|
36 |
-
frame_processor_module = importlib.import_module('DeepFakeAI.processors.frame.modules.' + frame_processor)
|
37 |
-
for method_name in FRAME_PROCESSORS_METHODS:
|
38 |
-
if not hasattr(frame_processor_module, method_name):
|
39 |
-
raise NotImplementedError
|
40 |
-
except ModuleNotFoundError as exception:
|
41 |
-
logger.debug(exception.msg, __name__.upper())
|
42 |
-
sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
|
43 |
-
except NotImplementedError:
|
44 |
-
sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
|
45 |
-
return frame_processor_module
|
46 |
-
|
47 |
-
|
48 |
-
def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]:
|
49 |
-
global FRAME_PROCESSORS_MODULES
|
50 |
-
|
51 |
-
if not FRAME_PROCESSORS_MODULES:
|
52 |
-
for frame_processor in frame_processors:
|
53 |
-
frame_processor_module = load_frame_processor_module(frame_processor)
|
54 |
-
FRAME_PROCESSORS_MODULES.append(frame_processor_module)
|
55 |
-
return FRAME_PROCESSORS_MODULES
|
56 |
-
|
57 |
-
|
58 |
-
def clear_frame_processors_modules() -> None:
|
59 |
-
global FRAME_PROCESSORS_MODULES
|
60 |
-
|
61 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
62 |
-
frame_processor_module.clear_frame_processor()
|
63 |
-
FRAME_PROCESSORS_MODULES = []
|
64 |
-
|
65 |
-
|
66 |
-
def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : Process_Frames) -> None:
|
67 |
-
with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = DeepFakeAI.globals.log_level in [ 'warn', 'error' ]) as progress:
|
68 |
-
progress.set_postfix(
|
69 |
-
{
|
70 |
-
'execution_providers': encode_execution_providers(DeepFakeAI.globals.execution_providers),
|
71 |
-
'execution_thread_count': DeepFakeAI.globals.execution_thread_count,
|
72 |
-
'execution_queue_count': DeepFakeAI.globals.execution_queue_count
|
73 |
-
})
|
74 |
-
with ThreadPoolExecutor(max_workers = DeepFakeAI.globals.execution_thread_count) as executor:
|
75 |
-
futures = []
|
76 |
-
queue_temp_frame_paths : Queue[str] = create_queue(temp_frame_paths)
|
77 |
-
queue_per_future = max(len(temp_frame_paths) // DeepFakeAI.globals.execution_thread_count * DeepFakeAI.globals.execution_queue_count, 1)
|
78 |
-
while not queue_temp_frame_paths.empty():
|
79 |
-
payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future)
|
80 |
-
future = executor.submit(process_frames, source_paths, payload_temp_frame_paths, progress.update)
|
81 |
-
futures.append(future)
|
82 |
-
for future_done in as_completed(futures):
|
83 |
-
future_done.result()
|
84 |
-
|
85 |
-
|
86 |
-
def create_queue(temp_frame_paths : List[str]) -> Queue[str]:
|
87 |
-
queue : Queue[str] = Queue()
|
88 |
-
for frame_path in temp_frame_paths:
|
89 |
-
queue.put(frame_path)
|
90 |
-
return queue
|
91 |
-
|
92 |
-
|
93 |
-
def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]:
|
94 |
-
queues = []
|
95 |
-
for _ in range(queue_per_future):
|
96 |
-
if not queue.empty():
|
97 |
-
queues.append(queue.get())
|
98 |
-
return queues
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/frame/globals.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
|
3 |
-
from DeepFakeAI.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
|
4 |
-
|
5 |
-
face_swapper_model : Optional[FaceSwapperModel] = None
|
6 |
-
face_enhancer_model : Optional[FaceEnhancerModel] = None
|
7 |
-
face_enhancer_blend : Optional[int] = None
|
8 |
-
frame_enhancer_model : Optional[FrameEnhancerModel] = None
|
9 |
-
frame_enhancer_blend : Optional[int] = None
|
10 |
-
face_debugger_items : Optional[List[FaceDebuggerItem]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/frame/modules/__init__.py
DELETED
File without changes
|
DeepFakeAI/processors/frame/modules/face_debugger.py
DELETED
@@ -1,142 +0,0 @@
|
|
1 |
-
from typing import Any, List, Literal
|
2 |
-
from argparse import ArgumentParser
|
3 |
-
import cv2
|
4 |
-
import numpy
|
5 |
-
|
6 |
-
import DeepFakeAI.globals
|
7 |
-
import DeepFakeAI.processors.frame.core as frame_processors
|
8 |
-
from DeepFakeAI import wording
|
9 |
-
from DeepFakeAI.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
|
10 |
-
from DeepFakeAI.face_store import get_reference_faces
|
11 |
-
from DeepFakeAI.content_analyser import clear_content_analyser
|
12 |
-
from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode
|
13 |
-
from DeepFakeAI.vision import read_image, read_static_image, read_static_images, write_image
|
14 |
-
from DeepFakeAI.face_helper import warp_face
|
15 |
-
from DeepFakeAI.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
|
16 |
-
from DeepFakeAI.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
|
17 |
-
|
18 |
-
NAME = __name__.upper()
|
19 |
-
|
20 |
-
|
21 |
-
def get_frame_processor() -> None:
|
22 |
-
pass
|
23 |
-
|
24 |
-
|
25 |
-
def clear_frame_processor() -> None:
|
26 |
-
pass
|
27 |
-
|
28 |
-
|
29 |
-
def get_options(key : Literal['model']) -> None:
|
30 |
-
pass
|
31 |
-
|
32 |
-
|
33 |
-
def set_options(key : Literal['model'], value : Any) -> None:
|
34 |
-
pass
|
35 |
-
|
36 |
-
|
37 |
-
def register_args(program : ArgumentParser) -> None:
|
38 |
-
program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = [ 'kps', 'face-mask' ], choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
|
39 |
-
|
40 |
-
|
41 |
-
def apply_args(program : ArgumentParser) -> None:
|
42 |
-
args = program.parse_args()
|
43 |
-
frame_processors_globals.face_debugger_items = args.face_debugger_items
|
44 |
-
|
45 |
-
|
46 |
-
def pre_check() -> bool:
|
47 |
-
return True
|
48 |
-
|
49 |
-
|
50 |
-
def pre_process(mode : ProcessMode) -> bool:
|
51 |
-
return True
|
52 |
-
|
53 |
-
|
54 |
-
def post_process() -> None:
|
55 |
-
clear_frame_processor()
|
56 |
-
clear_face_analyser()
|
57 |
-
clear_content_analyser()
|
58 |
-
clear_face_occluder()
|
59 |
-
clear_face_parser()
|
60 |
-
read_static_image.cache_clear()
|
61 |
-
|
62 |
-
|
63 |
-
def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
64 |
-
primary_color = (0, 0, 255)
|
65 |
-
secondary_color = (0, 255, 0)
|
66 |
-
bounding_box = target_face.bbox.astype(numpy.int32)
|
67 |
-
if 'bbox' in frame_processors_globals.face_debugger_items:
|
68 |
-
cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2)
|
69 |
-
if 'face-mask' in frame_processors_globals.face_debugger_items:
|
70 |
-
crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, 'arcface_128_v2', (128, 512))
|
71 |
-
inverse_matrix = cv2.invertAffineTransform(affine_matrix)
|
72 |
-
temp_frame_size = temp_frame.shape[:2][::-1]
|
73 |
-
crop_mask_list = []
|
74 |
-
if 'box' in DeepFakeAI.globals.face_mask_types:
|
75 |
-
crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], 0, DeepFakeAI.globals.face_mask_padding))
|
76 |
-
if 'occlusion' in DeepFakeAI.globals.face_mask_types:
|
77 |
-
crop_mask_list.append(create_occlusion_mask(crop_frame))
|
78 |
-
if 'region' in DeepFakeAI.globals.face_mask_types:
|
79 |
-
crop_mask_list.append(create_region_mask(crop_frame, DeepFakeAI.globals.face_mask_regions))
|
80 |
-
crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
|
81 |
-
crop_mask = (crop_mask * 255).astype(numpy.uint8)
|
82 |
-
inverse_mask_frame = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size)
|
83 |
-
inverse_mask_frame_edges = cv2.threshold(inverse_mask_frame, 100, 255, cv2.THRESH_BINARY)[1]
|
84 |
-
inverse_mask_frame_edges[inverse_mask_frame_edges > 0] = 255
|
85 |
-
inverse_mask_contours = cv2.findContours(inverse_mask_frame_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]
|
86 |
-
cv2.drawContours(temp_frame, inverse_mask_contours, -1, primary_color, 2)
|
87 |
-
if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60:
|
88 |
-
if 'kps' in frame_processors_globals.face_debugger_items:
|
89 |
-
kps = target_face.kps.astype(numpy.int32)
|
90 |
-
for index in range(kps.shape[0]):
|
91 |
-
cv2.circle(temp_frame, (kps[index][0], kps[index][1]), 3, primary_color, -1)
|
92 |
-
if 'score' in frame_processors_globals.face_debugger_items:
|
93 |
-
score_text = str(round(target_face.score, 2))
|
94 |
-
score_position = (bounding_box[0] + 10, bounding_box[1] + 20)
|
95 |
-
cv2.putText(temp_frame, score_text, score_position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, secondary_color, 2)
|
96 |
-
return temp_frame
|
97 |
-
|
98 |
-
|
99 |
-
def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
100 |
-
pass
|
101 |
-
|
102 |
-
|
103 |
-
def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
|
104 |
-
if 'reference' in DeepFakeAI.globals.face_selector_mode:
|
105 |
-
similar_faces = find_similar_faces(temp_frame, reference_faces, DeepFakeAI.globals.reference_face_distance)
|
106 |
-
if similar_faces:
|
107 |
-
for similar_face in similar_faces:
|
108 |
-
temp_frame = debug_face(source_face, similar_face, temp_frame)
|
109 |
-
if 'one' in DeepFakeAI.globals.face_selector_mode:
|
110 |
-
target_face = get_one_face(temp_frame)
|
111 |
-
if target_face:
|
112 |
-
temp_frame = debug_face(source_face, target_face, temp_frame)
|
113 |
-
if 'many' in DeepFakeAI.globals.face_selector_mode:
|
114 |
-
many_faces = get_many_faces(temp_frame)
|
115 |
-
if many_faces:
|
116 |
-
for target_face in many_faces:
|
117 |
-
temp_frame = debug_face(source_face, target_face, temp_frame)
|
118 |
-
return temp_frame
|
119 |
-
|
120 |
-
|
121 |
-
def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
122 |
-
source_frames = read_static_images(source_paths)
|
123 |
-
source_face = get_average_face(source_frames)
|
124 |
-
reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None
|
125 |
-
for temp_frame_path in temp_frame_paths:
|
126 |
-
temp_frame = read_image(temp_frame_path)
|
127 |
-
result_frame = process_frame(source_face, reference_faces, temp_frame)
|
128 |
-
write_image(temp_frame_path, result_frame)
|
129 |
-
update_progress()
|
130 |
-
|
131 |
-
|
132 |
-
def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
|
133 |
-
source_frames = read_static_images(source_paths)
|
134 |
-
source_face = get_average_face(source_frames)
|
135 |
-
target_frame = read_static_image(target_path)
|
136 |
-
reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None
|
137 |
-
result_frame = process_frame(source_face, reference_faces, target_frame)
|
138 |
-
write_image(output_path, result_frame)
|
139 |
-
|
140 |
-
|
141 |
-
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
142 |
-
frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/frame/modules/face_enhancer.py
DELETED
@@ -1,249 +0,0 @@
|
|
1 |
-
from typing import Any, List, Literal, Optional
|
2 |
-
from argparse import ArgumentParser
|
3 |
-
import cv2
|
4 |
-
import threading
|
5 |
-
import numpy
|
6 |
-
import onnxruntime
|
7 |
-
|
8 |
-
import DeepFakeAI.globals
|
9 |
-
import DeepFakeAI.processors.frame.core as frame_processors
|
10 |
-
from DeepFakeAI import logger, wording
|
11 |
-
from DeepFakeAI.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face
|
12 |
-
from DeepFakeAI.face_helper import warp_face, paste_back
|
13 |
-
from DeepFakeAI.content_analyser import clear_content_analyser
|
14 |
-
from DeepFakeAI.face_store import get_reference_faces
|
15 |
-
from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
|
16 |
-
from DeepFakeAI.common_helper import create_metavar
|
17 |
-
from DeepFakeAI.filesystem import is_file, is_image, is_video, resolve_relative_path
|
18 |
-
from DeepFakeAI.download import conditional_download, is_download_done
|
19 |
-
from DeepFakeAI.vision import read_image, read_static_image, write_image
|
20 |
-
from DeepFakeAI.processors.frame import globals as frame_processors_globals
|
21 |
-
from DeepFakeAI.processors.frame import choices as frame_processors_choices
|
22 |
-
from DeepFakeAI.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder
|
23 |
-
|
24 |
-
FRAME_PROCESSOR = None
|
25 |
-
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
|
26 |
-
THREAD_LOCK : threading.Lock = threading.Lock()
|
27 |
-
NAME = __name__.upper()
|
28 |
-
MODELS : ModelSet =\
|
29 |
-
{
|
30 |
-
'codeformer':
|
31 |
-
{
|
32 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/codeformer.onnx',
|
33 |
-
'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
|
34 |
-
'template': 'ffhq_512',
|
35 |
-
'size': (512, 512)
|
36 |
-
},
|
37 |
-
'gfpgan_1.2':
|
38 |
-
{
|
39 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gfpgan_1.2.onnx',
|
40 |
-
'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
|
41 |
-
'template': 'ffhq_512',
|
42 |
-
'size': (512, 512)
|
43 |
-
},
|
44 |
-
'gfpgan_1.3':
|
45 |
-
{
|
46 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gfpgan_1.3.onnx',
|
47 |
-
'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
|
48 |
-
'template': 'ffhq_512',
|
49 |
-
'size': (512, 512)
|
50 |
-
},
|
51 |
-
'gfpgan_1.4':
|
52 |
-
{
|
53 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gfpgan_1.4.onnx',
|
54 |
-
'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
|
55 |
-
'template': 'ffhq_512',
|
56 |
-
'size': (512, 512)
|
57 |
-
},
|
58 |
-
'gpen_bfr_256':
|
59 |
-
{
|
60 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gpen_bfr_256.onnx',
|
61 |
-
'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
|
62 |
-
'template': 'arcface_128_v2',
|
63 |
-
'size': (128, 256)
|
64 |
-
},
|
65 |
-
'gpen_bfr_512':
|
66 |
-
{
|
67 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gpen_bfr_512.onnx',
|
68 |
-
'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
|
69 |
-
'template': 'ffhq_512',
|
70 |
-
'size': (512, 512)
|
71 |
-
},
|
72 |
-
'restoreformer':
|
73 |
-
{
|
74 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/restoreformer.onnx',
|
75 |
-
'path': resolve_relative_path('../.assets/models/restoreformer.onnx'),
|
76 |
-
'template': 'ffhq_512',
|
77 |
-
'size': (512, 512)
|
78 |
-
}
|
79 |
-
}
|
80 |
-
OPTIONS : Optional[OptionsWithModel] = None
|
81 |
-
|
82 |
-
|
83 |
-
def get_frame_processor() -> Any:
|
84 |
-
global FRAME_PROCESSOR
|
85 |
-
|
86 |
-
with THREAD_LOCK:
|
87 |
-
if FRAME_PROCESSOR is None:
|
88 |
-
model_path = get_options('model').get('path')
|
89 |
-
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers)
|
90 |
-
return FRAME_PROCESSOR
|
91 |
-
|
92 |
-
|
93 |
-
def clear_frame_processor() -> None:
|
94 |
-
global FRAME_PROCESSOR
|
95 |
-
|
96 |
-
FRAME_PROCESSOR = None
|
97 |
-
|
98 |
-
|
99 |
-
def get_options(key : Literal['model']) -> Any:
|
100 |
-
global OPTIONS
|
101 |
-
|
102 |
-
if OPTIONS is None:
|
103 |
-
OPTIONS =\
|
104 |
-
{
|
105 |
-
'model': MODELS[frame_processors_globals.face_enhancer_model]
|
106 |
-
}
|
107 |
-
return OPTIONS.get(key)
|
108 |
-
|
109 |
-
|
110 |
-
def set_options(key : Literal['model'], value : Any) -> None:
|
111 |
-
global OPTIONS
|
112 |
-
|
113 |
-
OPTIONS[key] = value
|
114 |
-
|
115 |
-
|
116 |
-
def register_args(program : ArgumentParser) -> None:
|
117 |
-
program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
|
118 |
-
program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
|
119 |
-
|
120 |
-
|
121 |
-
def apply_args(program : ArgumentParser) -> None:
|
122 |
-
args = program.parse_args()
|
123 |
-
frame_processors_globals.face_enhancer_model = args.face_enhancer_model
|
124 |
-
frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend
|
125 |
-
|
126 |
-
|
127 |
-
def pre_check() -> bool:
|
128 |
-
if not DeepFakeAI.globals.skip_download:
|
129 |
-
download_directory_path = resolve_relative_path('../.assets/models')
|
130 |
-
model_url = get_options('model').get('url')
|
131 |
-
conditional_download(download_directory_path, [ model_url ])
|
132 |
-
return True
|
133 |
-
|
134 |
-
|
135 |
-
def pre_process(mode : ProcessMode) -> bool:
|
136 |
-
model_url = get_options('model').get('url')
|
137 |
-
model_path = get_options('model').get('path')
|
138 |
-
if not DeepFakeAI.globals.skip_download and not is_download_done(model_url, model_path):
|
139 |
-
logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
|
140 |
-
return False
|
141 |
-
elif not is_file(model_path):
|
142 |
-
logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
|
143 |
-
return False
|
144 |
-
if mode in [ 'output', 'preview' ] and not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path):
|
145 |
-
logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
|
146 |
-
return False
|
147 |
-
if mode == 'output' and not DeepFakeAI.globals.output_path:
|
148 |
-
logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
|
149 |
-
return False
|
150 |
-
return True
|
151 |
-
|
152 |
-
|
153 |
-
def post_process() -> None:
|
154 |
-
clear_frame_processor()
|
155 |
-
clear_face_analyser()
|
156 |
-
clear_content_analyser()
|
157 |
-
clear_face_occluder()
|
158 |
-
read_static_image.cache_clear()
|
159 |
-
|
160 |
-
|
161 |
-
def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
|
162 |
-
frame_processor = get_frame_processor()
|
163 |
-
model_template = get_options('model').get('template')
|
164 |
-
model_size = get_options('model').get('size')
|
165 |
-
crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
|
166 |
-
crop_mask_list =\
|
167 |
-
[
|
168 |
-
create_static_box_mask(crop_frame.shape[:2][::-1], DeepFakeAI.globals.face_mask_blur, (0, 0, 0, 0))
|
169 |
-
]
|
170 |
-
if 'occlusion' in DeepFakeAI.globals.face_mask_types:
|
171 |
-
crop_mask_list.append(create_occlusion_mask(crop_frame))
|
172 |
-
crop_frame = prepare_crop_frame(crop_frame)
|
173 |
-
frame_processor_inputs = {}
|
174 |
-
for frame_processor_input in frame_processor.get_inputs():
|
175 |
-
if frame_processor_input.name == 'input':
|
176 |
-
frame_processor_inputs[frame_processor_input.name] = crop_frame
|
177 |
-
if frame_processor_input.name == 'weight':
|
178 |
-
frame_processor_inputs[frame_processor_input.name] = numpy.array([ 1 ], dtype = numpy.double)
|
179 |
-
with THREAD_SEMAPHORE:
|
180 |
-
crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
|
181 |
-
crop_frame = normalize_crop_frame(crop_frame)
|
182 |
-
crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
|
183 |
-
paste_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
|
184 |
-
temp_frame = blend_frame(temp_frame, paste_frame)
|
185 |
-
return temp_frame
|
186 |
-
|
187 |
-
|
188 |
-
def prepare_crop_frame(crop_frame : Frame) -> Frame:
|
189 |
-
crop_frame = crop_frame[:, :, ::-1] / 255.0
|
190 |
-
crop_frame = (crop_frame - 0.5) / 0.5
|
191 |
-
crop_frame = numpy.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
192 |
-
return crop_frame
|
193 |
-
|
194 |
-
|
195 |
-
def normalize_crop_frame(crop_frame : Frame) -> Frame:
|
196 |
-
crop_frame = numpy.clip(crop_frame, -1, 1)
|
197 |
-
crop_frame = (crop_frame + 1) / 2
|
198 |
-
crop_frame = crop_frame.transpose(1, 2, 0)
|
199 |
-
crop_frame = (crop_frame * 255.0).round()
|
200 |
-
crop_frame = crop_frame.astype(numpy.uint8)[:, :, ::-1]
|
201 |
-
return crop_frame
|
202 |
-
|
203 |
-
|
204 |
-
def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
|
205 |
-
face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100)
|
206 |
-
temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0)
|
207 |
-
return temp_frame
|
208 |
-
|
209 |
-
|
210 |
-
def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Optional[Frame]:
|
211 |
-
return enhance_face(target_face, temp_frame)
|
212 |
-
|
213 |
-
|
214 |
-
def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
|
215 |
-
if 'reference' in DeepFakeAI.globals.face_selector_mode:
|
216 |
-
similar_faces = find_similar_faces(temp_frame, reference_faces, DeepFakeAI.globals.reference_face_distance)
|
217 |
-
if similar_faces:
|
218 |
-
for similar_face in similar_faces:
|
219 |
-
temp_frame = enhance_face(similar_face, temp_frame)
|
220 |
-
if 'one' in DeepFakeAI.globals.face_selector_mode:
|
221 |
-
target_face = get_one_face(temp_frame)
|
222 |
-
if target_face:
|
223 |
-
temp_frame = enhance_face(target_face, temp_frame)
|
224 |
-
if 'many' in DeepFakeAI.globals.face_selector_mode:
|
225 |
-
many_faces = get_many_faces(temp_frame)
|
226 |
-
if many_faces:
|
227 |
-
for target_face in many_faces:
|
228 |
-
temp_frame = enhance_face(target_face, temp_frame)
|
229 |
-
return temp_frame
|
230 |
-
|
231 |
-
|
232 |
-
def process_frames(source_path : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
233 |
-
reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None
|
234 |
-
for temp_frame_path in temp_frame_paths:
|
235 |
-
temp_frame = read_image(temp_frame_path)
|
236 |
-
result_frame = process_frame(None, reference_faces, temp_frame)
|
237 |
-
write_image(temp_frame_path, result_frame)
|
238 |
-
update_progress()
|
239 |
-
|
240 |
-
|
241 |
-
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
242 |
-
reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None
|
243 |
-
target_frame = read_static_image(target_path)
|
244 |
-
result_frame = process_frame(None, reference_faces, target_frame)
|
245 |
-
write_image(output_path, result_frame)
|
246 |
-
|
247 |
-
|
248 |
-
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
249 |
-
frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/frame/modules/face_swapper.py
DELETED
@@ -1,302 +0,0 @@
|
|
1 |
-
from typing import Any, List, Literal, Optional
|
2 |
-
from argparse import ArgumentParser
|
3 |
-
import threading
|
4 |
-
import numpy
|
5 |
-
import onnx
|
6 |
-
import onnxruntime
|
7 |
-
from onnx import numpy_helper
|
8 |
-
|
9 |
-
import DeepFakeAI.globals
|
10 |
-
import DeepFakeAI.processors.frame.core as frame_processors
|
11 |
-
from DeepFakeAI import logger, wording
|
12 |
-
from DeepFakeAI.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
|
13 |
-
from DeepFakeAI.face_helper import warp_face, paste_back
|
14 |
-
from DeepFakeAI.face_store import get_reference_faces
|
15 |
-
from DeepFakeAI.content_analyser import clear_content_analyser
|
16 |
-
from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, Embedding
|
17 |
-
from DeepFakeAI.filesystem import is_file, is_image, are_images, is_video, resolve_relative_path
|
18 |
-
from DeepFakeAI.download import conditional_download, is_download_done
|
19 |
-
from DeepFakeAI.vision import read_image, read_static_image, read_static_images, write_image
|
20 |
-
from DeepFakeAI.processors.frame import globals as frame_processors_globals
|
21 |
-
from DeepFakeAI.processors.frame import choices as frame_processors_choices
|
22 |
-
from DeepFakeAI.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
|
23 |
-
|
24 |
-
FRAME_PROCESSOR = None
|
25 |
-
MODEL_MATRIX = None
|
26 |
-
THREAD_LOCK : threading.Lock = threading.Lock()
|
27 |
-
NAME = __name__.upper()
|
28 |
-
MODELS : ModelSet =\
|
29 |
-
{
|
30 |
-
'blendswap_256':
|
31 |
-
{
|
32 |
-
'type': 'blendswap',
|
33 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/blendswap_256.onnx',
|
34 |
-
'path': resolve_relative_path('../.assets/models/blendswap_256.onnx'),
|
35 |
-
'template': 'ffhq_512',
|
36 |
-
'size': (512, 256),
|
37 |
-
'mean': [ 0.0, 0.0, 0.0 ],
|
38 |
-
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
39 |
-
},
|
40 |
-
'inswapper_128':
|
41 |
-
{
|
42 |
-
'type': 'inswapper',
|
43 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/inswapper_128.onnx',
|
44 |
-
'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
|
45 |
-
'template': 'arcface_128_v2',
|
46 |
-
'size': (128, 128),
|
47 |
-
'mean': [ 0.0, 0.0, 0.0 ],
|
48 |
-
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
49 |
-
},
|
50 |
-
'inswapper_128_fp16':
|
51 |
-
{
|
52 |
-
'type': 'inswapper',
|
53 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/inswapper_128_fp16.onnx',
|
54 |
-
'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
|
55 |
-
'template': 'arcface_128_v2',
|
56 |
-
'size': (128, 128),
|
57 |
-
'mean': [ 0.0, 0.0, 0.0 ],
|
58 |
-
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
59 |
-
},
|
60 |
-
'simswap_256':
|
61 |
-
{
|
62 |
-
'type': 'simswap',
|
63 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/simswap_256.onnx',
|
64 |
-
'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
|
65 |
-
'template': 'arcface_112_v1',
|
66 |
-
'size': (112, 256),
|
67 |
-
'mean': [ 0.485, 0.456, 0.406 ],
|
68 |
-
'standard_deviation': [ 0.229, 0.224, 0.225 ]
|
69 |
-
},
|
70 |
-
'simswap_512_unofficial':
|
71 |
-
{
|
72 |
-
'type': 'simswap',
|
73 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/simswap_512_unofficial.onnx',
|
74 |
-
'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
|
75 |
-
'template': 'arcface_112_v1',
|
76 |
-
'size': (112, 512),
|
77 |
-
'mean': [ 0.0, 0.0, 0.0 ],
|
78 |
-
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
79 |
-
}
|
80 |
-
}
|
81 |
-
OPTIONS : Optional[OptionsWithModel] = None
|
82 |
-
|
83 |
-
|
84 |
-
def get_frame_processor() -> Any:
|
85 |
-
global FRAME_PROCESSOR
|
86 |
-
|
87 |
-
with THREAD_LOCK:
|
88 |
-
if FRAME_PROCESSOR is None:
|
89 |
-
model_path = get_options('model').get('path')
|
90 |
-
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers)
|
91 |
-
return FRAME_PROCESSOR
|
92 |
-
|
93 |
-
|
94 |
-
def clear_frame_processor() -> None:
|
95 |
-
global FRAME_PROCESSOR
|
96 |
-
|
97 |
-
FRAME_PROCESSOR = None
|
98 |
-
|
99 |
-
|
100 |
-
def get_model_matrix() -> Any:
|
101 |
-
global MODEL_MATRIX
|
102 |
-
|
103 |
-
with THREAD_LOCK:
|
104 |
-
if MODEL_MATRIX is None:
|
105 |
-
model_path = get_options('model').get('path')
|
106 |
-
model = onnx.load(model_path)
|
107 |
-
MODEL_MATRIX = numpy_helper.to_array(model.graph.initializer[-1])
|
108 |
-
return MODEL_MATRIX
|
109 |
-
|
110 |
-
|
111 |
-
def clear_model_matrix() -> None:
|
112 |
-
global MODEL_MATRIX
|
113 |
-
|
114 |
-
MODEL_MATRIX = None
|
115 |
-
|
116 |
-
|
117 |
-
def get_options(key : Literal['model']) -> Any:
|
118 |
-
global OPTIONS
|
119 |
-
|
120 |
-
if OPTIONS is None:
|
121 |
-
OPTIONS =\
|
122 |
-
{
|
123 |
-
'model': MODELS[frame_processors_globals.face_swapper_model]
|
124 |
-
}
|
125 |
-
return OPTIONS.get(key)
|
126 |
-
|
127 |
-
|
128 |
-
def set_options(key : Literal['model'], value : Any) -> None:
|
129 |
-
global OPTIONS
|
130 |
-
|
131 |
-
OPTIONS[key] = value
|
132 |
-
|
133 |
-
|
134 |
-
def register_args(program : ArgumentParser) -> None:
|
135 |
-
program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models)
|
136 |
-
|
137 |
-
|
138 |
-
def apply_args(program : ArgumentParser) -> None:
|
139 |
-
args = program.parse_args()
|
140 |
-
frame_processors_globals.face_swapper_model = args.face_swapper_model
|
141 |
-
if args.face_swapper_model == 'blendswap_256':
|
142 |
-
DeepFakeAI.globals.face_recognizer_model = 'arcface_blendswap'
|
143 |
-
if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
|
144 |
-
DeepFakeAI.globals.face_recognizer_model = 'arcface_inswapper'
|
145 |
-
if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
|
146 |
-
DeepFakeAI.globals.face_recognizer_model = 'arcface_simswap'
|
147 |
-
|
148 |
-
|
149 |
-
def pre_check() -> bool:
|
150 |
-
if not DeepFakeAI.globals.skip_download:
|
151 |
-
download_directory_path = resolve_relative_path('../.assets/models')
|
152 |
-
model_url = get_options('model').get('url')
|
153 |
-
conditional_download(download_directory_path, [ model_url ])
|
154 |
-
return True
|
155 |
-
|
156 |
-
|
157 |
-
def pre_process(mode : ProcessMode) -> bool:
|
158 |
-
model_url = get_options('model').get('url')
|
159 |
-
model_path = get_options('model').get('path')
|
160 |
-
if not DeepFakeAI.globals.skip_download and not is_download_done(model_url, model_path):
|
161 |
-
logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
|
162 |
-
return False
|
163 |
-
elif not is_file(model_path):
|
164 |
-
logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
|
165 |
-
return False
|
166 |
-
if not are_images(DeepFakeAI.globals.source_paths):
|
167 |
-
logger.error(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
|
168 |
-
return False
|
169 |
-
for source_frame in read_static_images(DeepFakeAI.globals.source_paths):
|
170 |
-
if not get_one_face(source_frame):
|
171 |
-
logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
|
172 |
-
return False
|
173 |
-
if mode in [ 'output', 'preview' ] and not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path):
|
174 |
-
logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
|
175 |
-
return False
|
176 |
-
if mode == 'output' and not DeepFakeAI.globals.output_path:
|
177 |
-
logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
|
178 |
-
return False
|
179 |
-
return True
|
180 |
-
|
181 |
-
|
182 |
-
def post_process() -> None:
|
183 |
-
clear_frame_processor()
|
184 |
-
clear_model_matrix()
|
185 |
-
clear_face_analyser()
|
186 |
-
clear_content_analyser()
|
187 |
-
clear_face_occluder()
|
188 |
-
clear_face_parser()
|
189 |
-
read_static_image.cache_clear()
|
190 |
-
|
191 |
-
|
192 |
-
def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
193 |
-
frame_processor = get_frame_processor()
|
194 |
-
model_template = get_options('model').get('template')
|
195 |
-
model_size = get_options('model').get('size')
|
196 |
-
model_type = get_options('model').get('type')
|
197 |
-
crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
|
198 |
-
crop_mask_list = []
|
199 |
-
if 'box' in DeepFakeAI.globals.face_mask_types:
|
200 |
-
crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], DeepFakeAI.globals.face_mask_blur, DeepFakeAI.globals.face_mask_padding))
|
201 |
-
if 'occlusion' in DeepFakeAI.globals.face_mask_types:
|
202 |
-
crop_mask_list.append(create_occlusion_mask(crop_frame))
|
203 |
-
crop_frame = prepare_crop_frame(crop_frame)
|
204 |
-
frame_processor_inputs = {}
|
205 |
-
for frame_processor_input in frame_processor.get_inputs():
|
206 |
-
if frame_processor_input.name == 'source':
|
207 |
-
if model_type == 'blendswap':
|
208 |
-
frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face)
|
209 |
-
else:
|
210 |
-
frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face)
|
211 |
-
if frame_processor_input.name == 'target':
|
212 |
-
frame_processor_inputs[frame_processor_input.name] = crop_frame
|
213 |
-
crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
|
214 |
-
crop_frame = normalize_crop_frame(crop_frame)
|
215 |
-
if 'region' in DeepFakeAI.globals.face_mask_types:
|
216 |
-
crop_mask_list.append(create_region_mask(crop_frame, DeepFakeAI.globals.face_mask_regions))
|
217 |
-
crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
|
218 |
-
temp_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
|
219 |
-
return temp_frame
|
220 |
-
|
221 |
-
|
222 |
-
def prepare_source_frame(source_face : Face) -> Frame:
|
223 |
-
source_frame = read_static_image(DeepFakeAI.globals.source_paths[0])
|
224 |
-
source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_112_v2', (112, 112))
|
225 |
-
source_frame = source_frame[:, :, ::-1] / 255.0
|
226 |
-
source_frame = source_frame.transpose(2, 0, 1)
|
227 |
-
source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
|
228 |
-
return source_frame
|
229 |
-
|
230 |
-
|
231 |
-
def prepare_source_embedding(source_face : Face) -> Embedding:
|
232 |
-
model_type = get_options('model').get('type')
|
233 |
-
if model_type == 'inswapper':
|
234 |
-
model_matrix = get_model_matrix()
|
235 |
-
source_embedding = source_face.embedding.reshape((1, -1))
|
236 |
-
source_embedding = numpy.dot(source_embedding, model_matrix) / numpy.linalg.norm(source_embedding)
|
237 |
-
else:
|
238 |
-
source_embedding = source_face.normed_embedding.reshape(1, -1)
|
239 |
-
return source_embedding
|
240 |
-
|
241 |
-
|
242 |
-
def prepare_crop_frame(crop_frame : Frame) -> Frame:
|
243 |
-
model_mean = get_options('model').get('mean')
|
244 |
-
model_standard_deviation = get_options('model').get('standard_deviation')
|
245 |
-
crop_frame = crop_frame[:, :, ::-1] / 255.0
|
246 |
-
crop_frame = (crop_frame - model_mean) / model_standard_deviation
|
247 |
-
crop_frame = crop_frame.transpose(2, 0, 1)
|
248 |
-
crop_frame = numpy.expand_dims(crop_frame, axis = 0).astype(numpy.float32)
|
249 |
-
return crop_frame
|
250 |
-
|
251 |
-
|
252 |
-
def normalize_crop_frame(crop_frame : Frame) -> Frame:
|
253 |
-
crop_frame = crop_frame.transpose(1, 2, 0)
|
254 |
-
crop_frame = (crop_frame * 255.0).round()
|
255 |
-
crop_frame = crop_frame[:, :, ::-1].astype(numpy.uint8)
|
256 |
-
return crop_frame
|
257 |
-
|
258 |
-
|
259 |
-
def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
260 |
-
return swap_face(source_face, target_face, temp_frame)
|
261 |
-
|
262 |
-
|
263 |
-
def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
|
264 |
-
if 'reference' in DeepFakeAI.globals.face_selector_mode:
|
265 |
-
similar_faces = find_similar_faces(temp_frame, reference_faces, DeepFakeAI.globals.reference_face_distance)
|
266 |
-
if similar_faces:
|
267 |
-
for similar_face in similar_faces:
|
268 |
-
temp_frame = swap_face(source_face, similar_face, temp_frame)
|
269 |
-
if 'one' in DeepFakeAI.globals.face_selector_mode:
|
270 |
-
target_face = get_one_face(temp_frame)
|
271 |
-
if target_face:
|
272 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
273 |
-
if 'many' in DeepFakeAI.globals.face_selector_mode:
|
274 |
-
many_faces = get_many_faces(temp_frame)
|
275 |
-
if many_faces:
|
276 |
-
for target_face in many_faces:
|
277 |
-
temp_frame = swap_face(source_face, target_face, temp_frame)
|
278 |
-
return temp_frame
|
279 |
-
|
280 |
-
|
281 |
-
def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
282 |
-
source_frames = read_static_images(source_paths)
|
283 |
-
source_face = get_average_face(source_frames)
|
284 |
-
reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None
|
285 |
-
for temp_frame_path in temp_frame_paths:
|
286 |
-
temp_frame = read_image(temp_frame_path)
|
287 |
-
result_frame = process_frame(source_face, reference_faces, temp_frame)
|
288 |
-
write_image(temp_frame_path, result_frame)
|
289 |
-
update_progress()
|
290 |
-
|
291 |
-
|
292 |
-
def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
|
293 |
-
source_frames = read_static_images(source_paths)
|
294 |
-
source_face = get_average_face(source_frames)
|
295 |
-
reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None
|
296 |
-
target_frame = read_static_image(target_path)
|
297 |
-
result_frame = process_frame(source_face, reference_faces, target_frame)
|
298 |
-
write_image(output_path, result_frame)
|
299 |
-
|
300 |
-
|
301 |
-
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
302 |
-
frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/frame/modules/frame_enhancer.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
from typing import Any, List, Literal, Optional
|
2 |
-
from argparse import ArgumentParser
|
3 |
-
import threading
|
4 |
-
import cv2
|
5 |
-
from basicsr.archs.rrdbnet_arch import RRDBNet
|
6 |
-
from realesrgan import RealESRGANer
|
7 |
-
|
8 |
-
import DeepFakeAI.globals
|
9 |
-
import DeepFakeAI.processors.frame.core as frame_processors
|
10 |
-
from DeepFakeAI import logger, wording
|
11 |
-
from DeepFakeAI.face_analyser import clear_face_analyser
|
12 |
-
from DeepFakeAI.content_analyser import clear_content_analyser
|
13 |
-
from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
|
14 |
-
from DeepFakeAI.common_helper import create_metavar
|
15 |
-
from DeepFakeAI.execution_helper import map_device
|
16 |
-
from DeepFakeAI.filesystem import is_file, resolve_relative_path
|
17 |
-
from DeepFakeAI.download import conditional_download, is_download_done
|
18 |
-
from DeepFakeAI.vision import read_image, read_static_image, write_image
|
19 |
-
from DeepFakeAI.processors.frame import globals as frame_processors_globals
|
20 |
-
from DeepFakeAI.processors.frame import choices as frame_processors_choices
|
21 |
-
|
22 |
-
FRAME_PROCESSOR = None
|
23 |
-
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
|
24 |
-
THREAD_LOCK : threading.Lock = threading.Lock()
|
25 |
-
NAME = __name__.upper()
|
26 |
-
MODELS : ModelSet =\
|
27 |
-
{
|
28 |
-
'real_esrgan_x2plus':
|
29 |
-
{
|
30 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/real_esrgan_x2plus.pth',
|
31 |
-
'path': resolve_relative_path('../.assets/models/real_esrgan_x2plus.pth'),
|
32 |
-
'scale': 2
|
33 |
-
},
|
34 |
-
'real_esrgan_x4plus':
|
35 |
-
{
|
36 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/real_esrgan_x4plus.pth',
|
37 |
-
'path': resolve_relative_path('../.assets/models/real_esrgan_x4plus.pth'),
|
38 |
-
'scale': 4
|
39 |
-
},
|
40 |
-
'real_esrnet_x4plus':
|
41 |
-
{
|
42 |
-
'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/real_esrnet_x4plus.pth',
|
43 |
-
'path': resolve_relative_path('../.assets/models/real_esrnet_x4plus.pth'),
|
44 |
-
'scale': 4
|
45 |
-
}
|
46 |
-
}
|
47 |
-
OPTIONS : Optional[OptionsWithModel] = None
|
48 |
-
|
49 |
-
|
50 |
-
def get_frame_processor() -> Any:
|
51 |
-
global FRAME_PROCESSOR
|
52 |
-
|
53 |
-
with THREAD_LOCK:
|
54 |
-
if FRAME_PROCESSOR is None:
|
55 |
-
model_path = get_options('model').get('path')
|
56 |
-
model_scale = get_options('model').get('scale')
|
57 |
-
FRAME_PROCESSOR = RealESRGANer(
|
58 |
-
model_path = model_path,
|
59 |
-
model = RRDBNet(
|
60 |
-
num_in_ch = 3,
|
61 |
-
num_out_ch = 3,
|
62 |
-
scale = model_scale
|
63 |
-
),
|
64 |
-
device = map_device(DeepFakeAI.globals.execution_providers),
|
65 |
-
scale = model_scale
|
66 |
-
)
|
67 |
-
return FRAME_PROCESSOR
|
68 |
-
|
69 |
-
|
70 |
-
def clear_frame_processor() -> None:
|
71 |
-
global FRAME_PROCESSOR
|
72 |
-
|
73 |
-
FRAME_PROCESSOR = None
|
74 |
-
|
75 |
-
|
76 |
-
def get_options(key : Literal['model']) -> Any:
|
77 |
-
global OPTIONS
|
78 |
-
|
79 |
-
if OPTIONS is None:
|
80 |
-
OPTIONS =\
|
81 |
-
{
|
82 |
-
'model': MODELS[frame_processors_globals.frame_enhancer_model]
|
83 |
-
}
|
84 |
-
return OPTIONS.get(key)
|
85 |
-
|
86 |
-
|
87 |
-
def set_options(key : Literal['model'], value : Any) -> None:
|
88 |
-
global OPTIONS
|
89 |
-
|
90 |
-
OPTIONS[key] = value
|
91 |
-
|
92 |
-
|
93 |
-
def register_args(program : ArgumentParser) -> None:
|
94 |
-
program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'real_esrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models)
|
95 |
-
program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
|
96 |
-
|
97 |
-
|
98 |
-
def apply_args(program : ArgumentParser) -> None:
|
99 |
-
args = program.parse_args()
|
100 |
-
frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model
|
101 |
-
frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend
|
102 |
-
|
103 |
-
|
104 |
-
def pre_check() -> bool:
|
105 |
-
if not DeepFakeAI.globals.skip_download:
|
106 |
-
download_directory_path = resolve_relative_path('../.assets/models')
|
107 |
-
model_url = get_options('model').get('url')
|
108 |
-
conditional_download(download_directory_path, [ model_url ])
|
109 |
-
return True
|
110 |
-
|
111 |
-
|
112 |
-
def pre_process(mode : ProcessMode) -> bool:
|
113 |
-
model_url = get_options('model').get('url')
|
114 |
-
model_path = get_options('model').get('path')
|
115 |
-
if not DeepFakeAI.globals.skip_download and not is_download_done(model_url, model_path):
|
116 |
-
logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
|
117 |
-
return False
|
118 |
-
elif not is_file(model_path):
|
119 |
-
logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
|
120 |
-
return False
|
121 |
-
if mode == 'output' and not DeepFakeAI.globals.output_path:
|
122 |
-
logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
|
123 |
-
return False
|
124 |
-
return True
|
125 |
-
|
126 |
-
|
127 |
-
def post_process() -> None:
|
128 |
-
clear_frame_processor()
|
129 |
-
clear_face_analyser()
|
130 |
-
clear_content_analyser()
|
131 |
-
read_static_image.cache_clear()
|
132 |
-
|
133 |
-
|
134 |
-
def enhance_frame(temp_frame : Frame) -> Frame:
|
135 |
-
with THREAD_SEMAPHORE:
|
136 |
-
paste_frame, _ = get_frame_processor().enhance(temp_frame)
|
137 |
-
temp_frame = blend_frame(temp_frame, paste_frame)
|
138 |
-
return temp_frame
|
139 |
-
|
140 |
-
|
141 |
-
def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
|
142 |
-
frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100)
|
143 |
-
paste_frame_height, paste_frame_width = paste_frame.shape[0:2]
|
144 |
-
temp_frame = cv2.resize(temp_frame, (paste_frame_width, paste_frame_height))
|
145 |
-
temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, 1 - frame_enhancer_blend, 0)
|
146 |
-
return temp_frame
|
147 |
-
|
148 |
-
|
149 |
-
def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
150 |
-
pass
|
151 |
-
|
152 |
-
|
153 |
-
def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
|
154 |
-
return enhance_frame(temp_frame)
|
155 |
-
|
156 |
-
|
157 |
-
def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
158 |
-
for temp_frame_path in temp_frame_paths:
|
159 |
-
temp_frame = read_image(temp_frame_path)
|
160 |
-
result_frame = process_frame(None, None, temp_frame)
|
161 |
-
write_image(temp_frame_path, result_frame)
|
162 |
-
update_progress()
|
163 |
-
|
164 |
-
|
165 |
-
def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
|
166 |
-
target_frame = read_static_image(target_path)
|
167 |
-
result = process_frame(None, None, target_frame)
|
168 |
-
write_image(output_path, result)
|
169 |
-
|
170 |
-
|
171 |
-
def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
|
172 |
-
frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/processors/frame/typings.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from typing import Literal
|
2 |
-
|
3 |
-
FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
|
4 |
-
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer']
|
5 |
-
FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
|
6 |
-
|
7 |
-
FaceDebuggerItem = Literal['bbox', 'kps', 'face-mask', 'score']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/typing.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict
|
2 |
-
from collections import namedtuple
|
3 |
-
import numpy
|
4 |
-
|
5 |
-
Bbox = numpy.ndarray[Any, Any]
|
6 |
-
Kps = numpy.ndarray[Any, Any]
|
7 |
-
Score = float
|
8 |
-
Embedding = numpy.ndarray[Any, Any]
|
9 |
-
Face = namedtuple('Face',
|
10 |
-
[
|
11 |
-
'bbox',
|
12 |
-
'kps',
|
13 |
-
'score',
|
14 |
-
'embedding',
|
15 |
-
'normed_embedding',
|
16 |
-
'gender',
|
17 |
-
'age'
|
18 |
-
])
|
19 |
-
FaceSet = Dict[str, List[Face]]
|
20 |
-
FaceStore = TypedDict('FaceStore',
|
21 |
-
{
|
22 |
-
'static_faces' : FaceSet,
|
23 |
-
'reference_faces': FaceSet
|
24 |
-
})
|
25 |
-
Frame = numpy.ndarray[Any, Any]
|
26 |
-
Mask = numpy.ndarray[Any, Any]
|
27 |
-
Matrix = numpy.ndarray[Any, Any]
|
28 |
-
Padding = Tuple[int, int, int, int]
|
29 |
-
|
30 |
-
Update_Process = Callable[[], None]
|
31 |
-
Process_Frames = Callable[[List[str], List[str], Update_Process], None]
|
32 |
-
LogLevel = Literal['error', 'warn', 'info', 'debug']
|
33 |
-
Template = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512']
|
34 |
-
ProcessMode = Literal['output', 'preview', 'stream']
|
35 |
-
FaceSelectorMode = Literal['reference', 'one', 'many']
|
36 |
-
FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best']
|
37 |
-
FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior']
|
38 |
-
FaceAnalyserGender = Literal['male', 'female']
|
39 |
-
FaceDetectorModel = Literal['retinaface', 'yunet']
|
40 |
-
FaceRecognizerModel = Literal['arcface_blendswap', 'arcface_inswapper', 'arcface_simswap']
|
41 |
-
FaceMaskType = Literal['box', 'occlusion', 'region']
|
42 |
-
FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip']
|
43 |
-
TempFrameFormat = Literal['jpg', 'png']
|
44 |
-
OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
|
45 |
-
|
46 |
-
ModelValue = Dict[str, Any]
|
47 |
-
ModelSet = Dict[str, ModelValue]
|
48 |
-
OptionsWithModel = TypedDict('OptionsWithModel',
|
49 |
-
{
|
50 |
-
'model' : ModelValue
|
51 |
-
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/__init__.py
DELETED
File without changes
|
DeepFakeAI/uis/assets/fixes.css
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
:root:root:root button:not([class])
|
2 |
-
{
|
3 |
-
border-radius: 0.375rem;
|
4 |
-
float: left;
|
5 |
-
overflow: hidden;
|
6 |
-
width: 100%;
|
7 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/assets/overrides.css
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
:root:root:root input[type="number"]
|
2 |
-
{
|
3 |
-
max-width: 6rem;
|
4 |
-
}
|
5 |
-
|
6 |
-
:root:root:root [type="checkbox"],
|
7 |
-
:root:root:root [type="radio"]
|
8 |
-
{
|
9 |
-
border-radius: 50%;
|
10 |
-
height: 1.125rem;
|
11 |
-
width: 1.125rem;
|
12 |
-
}
|
13 |
-
|
14 |
-
:root:root:root input[type="range"]
|
15 |
-
{
|
16 |
-
height: 0.5rem;
|
17 |
-
}
|
18 |
-
|
19 |
-
:root:root:root input[type="range"]::-moz-range-thumb,
|
20 |
-
:root:root:root input[type="range"]::-webkit-slider-thumb
|
21 |
-
{
|
22 |
-
background: var(--neutral-300);
|
23 |
-
border: unset;
|
24 |
-
border-radius: 50%;
|
25 |
-
height: 1.125rem;
|
26 |
-
width: 1.125rem;
|
27 |
-
}
|
28 |
-
|
29 |
-
:root:root:root input[type="range"]::-webkit-slider-thumb
|
30 |
-
{
|
31 |
-
margin-top: 0.375rem;
|
32 |
-
}
|
33 |
-
|
34 |
-
:root:root:root .grid-wrap.fixed-height
|
35 |
-
{
|
36 |
-
min-height: unset;
|
37 |
-
}
|
38 |
-
|
39 |
-
:root:root:root .grid-container
|
40 |
-
{
|
41 |
-
grid-auto-rows: minmax(5em, 1fr);
|
42 |
-
grid-template-columns: repeat(var(--grid-cols), minmax(5em, 1fr));
|
43 |
-
grid-template-rows: repeat(var(--grid-rows), minmax(5em, 1fr));
|
44 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/choices.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from typing import List
|
2 |
-
|
3 |
-
from DeepFakeAI.uis.typing import WebcamMode
|
4 |
-
|
5 |
-
common_options : List[str] = [ 'keep-fps', 'keep-temp', 'skip-audio', 'skip-download' ]
|
6 |
-
webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
|
7 |
-
webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/__init__.py
DELETED
File without changes
|
DeepFakeAI/uis/components/about.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
from DeepFakeAI import metadata, wording
|
5 |
-
|
6 |
-
ABOUT_BUTTON : Optional[gradio.HTML] = None
|
7 |
-
DONATE_BUTTON : Optional[gradio.HTML] = None
|
8 |
-
|
9 |
-
|
10 |
-
def render() -> None:
|
11 |
-
global ABOUT_BUTTON
|
12 |
-
global DONATE_BUTTON
|
13 |
-
|
14 |
-
ABOUT_BUTTON = gradio.Button(
|
15 |
-
value = metadata.get('name') + ' ' + metadata.get('version'),
|
16 |
-
variant = 'primary',
|
17 |
-
link = metadata.get('url')
|
18 |
-
)
|
19 |
-
DONATE_BUTTON = gradio.Button(
|
20 |
-
value = wording.get('donate_button_label'),
|
21 |
-
link = 'https://donate.DeepFakeAI.io',
|
22 |
-
size = 'sm'
|
23 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/benchmark.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
from typing import Any, Optional, List, Dict, Generator
|
2 |
-
import time
|
3 |
-
import tempfile
|
4 |
-
import statistics
|
5 |
-
import gradio
|
6 |
-
|
7 |
-
import DeepFakeAI.globals
|
8 |
-
from DeepFakeAI import wording
|
9 |
-
from DeepFakeAI.face_analyser import get_face_analyser
|
10 |
-
from DeepFakeAI.face_store import clear_static_faces
|
11 |
-
from DeepFakeAI.processors.frame.core import get_frame_processors_modules
|
12 |
-
from DeepFakeAI.vision import count_video_frame_total
|
13 |
-
from DeepFakeAI.core import limit_resources, conditional_process
|
14 |
-
from DeepFakeAI.normalizer import normalize_output_path
|
15 |
-
from DeepFakeAI.filesystem import clear_temp
|
16 |
-
from DeepFakeAI.uis.core import get_ui_component
|
17 |
-
|
18 |
-
BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None
|
19 |
-
BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
|
20 |
-
BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
|
21 |
-
BENCHMARKS : Dict[str, str] =\
|
22 |
-
{
|
23 |
-
'240p': '.assets/examples/target-240p.mp4',
|
24 |
-
'360p': '.assets/examples/target-360p.mp4',
|
25 |
-
'540p': '.assets/examples/target-540p.mp4',
|
26 |
-
'720p': '.assets/examples/target-720p.mp4',
|
27 |
-
'1080p': '.assets/examples/target-1080p.mp4',
|
28 |
-
'1440p': '.assets/examples/target-1440p.mp4',
|
29 |
-
'2160p': '.assets/examples/target-2160p.mp4'
|
30 |
-
}
|
31 |
-
|
32 |
-
|
33 |
-
def render() -> None:
|
34 |
-
global BENCHMARK_RESULTS_DATAFRAME
|
35 |
-
global BENCHMARK_START_BUTTON
|
36 |
-
global BENCHMARK_CLEAR_BUTTON
|
37 |
-
|
38 |
-
BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe(
|
39 |
-
label = wording.get('benchmark_results_dataframe_label'),
|
40 |
-
headers =
|
41 |
-
[
|
42 |
-
'target_path',
|
43 |
-
'benchmark_cycles',
|
44 |
-
'average_run',
|
45 |
-
'fastest_run',
|
46 |
-
'slowest_run',
|
47 |
-
'relative_fps'
|
48 |
-
],
|
49 |
-
datatype =
|
50 |
-
[
|
51 |
-
'str',
|
52 |
-
'number',
|
53 |
-
'number',
|
54 |
-
'number',
|
55 |
-
'number',
|
56 |
-
'number'
|
57 |
-
]
|
58 |
-
)
|
59 |
-
BENCHMARK_START_BUTTON = gradio.Button(
|
60 |
-
value = wording.get('start_button_label'),
|
61 |
-
variant = 'primary',
|
62 |
-
size = 'sm'
|
63 |
-
)
|
64 |
-
BENCHMARK_CLEAR_BUTTON = gradio.Button(
|
65 |
-
value = wording.get('clear_button_label'),
|
66 |
-
size = 'sm'
|
67 |
-
)
|
68 |
-
|
69 |
-
|
70 |
-
def listen() -> None:
|
71 |
-
benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group')
|
72 |
-
benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider')
|
73 |
-
if benchmark_runs_checkbox_group and benchmark_cycles_slider:
|
74 |
-
BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME)
|
75 |
-
BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME)
|
76 |
-
|
77 |
-
|
78 |
-
def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
|
79 |
-
DeepFakeAI.globals.source_paths = [ '.assets/examples/source.jpg' ]
|
80 |
-
target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ]
|
81 |
-
benchmark_results = []
|
82 |
-
if target_paths:
|
83 |
-
pre_process()
|
84 |
-
for target_path in target_paths:
|
85 |
-
benchmark_results.append(benchmark(target_path, benchmark_cycles))
|
86 |
-
yield benchmark_results
|
87 |
-
post_process()
|
88 |
-
|
89 |
-
|
90 |
-
def pre_process() -> None:
|
91 |
-
limit_resources()
|
92 |
-
get_face_analyser()
|
93 |
-
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
|
94 |
-
frame_processor_module.get_frame_processor()
|
95 |
-
|
96 |
-
|
97 |
-
def post_process() -> None:
|
98 |
-
clear_static_faces()
|
99 |
-
|
100 |
-
|
101 |
-
def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
|
102 |
-
process_times = []
|
103 |
-
total_fps = 0.0
|
104 |
-
for i in range(benchmark_cycles):
|
105 |
-
DeepFakeAI.globals.target_path = target_path
|
106 |
-
DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.target_path, tempfile.gettempdir())
|
107 |
-
video_frame_total = count_video_frame_total(DeepFakeAI.globals.target_path)
|
108 |
-
start_time = time.perf_counter()
|
109 |
-
conditional_process()
|
110 |
-
end_time = time.perf_counter()
|
111 |
-
process_time = end_time - start_time
|
112 |
-
total_fps += video_frame_total / process_time
|
113 |
-
process_times.append(process_time)
|
114 |
-
average_run = round(statistics.mean(process_times), 2)
|
115 |
-
fastest_run = round(min(process_times), 2)
|
116 |
-
slowest_run = round(max(process_times), 2)
|
117 |
-
relative_fps = round(total_fps / benchmark_cycles, 2)
|
118 |
-
return\
|
119 |
-
[
|
120 |
-
DeepFakeAI.globals.target_path,
|
121 |
-
benchmark_cycles,
|
122 |
-
average_run,
|
123 |
-
fastest_run,
|
124 |
-
slowest_run,
|
125 |
-
relative_fps
|
126 |
-
]
|
127 |
-
|
128 |
-
|
129 |
-
def clear() -> gradio.Dataframe:
|
130 |
-
if DeepFakeAI.globals.target_path:
|
131 |
-
clear_temp(DeepFakeAI.globals.target_path)
|
132 |
-
return gradio.Dataframe(value = None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/benchmark_options.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
from DeepFakeAI import wording
|
5 |
-
from DeepFakeAI.uis.core import register_ui_component
|
6 |
-
from DeepFakeAI.uis.components.benchmark import BENCHMARKS
|
7 |
-
|
8 |
-
BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
9 |
-
BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
|
10 |
-
|
11 |
-
|
12 |
-
def render() -> None:
|
13 |
-
global BENCHMARK_RUNS_CHECKBOX_GROUP
|
14 |
-
global BENCHMARK_CYCLES_SLIDER
|
15 |
-
|
16 |
-
BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
17 |
-
label = wording.get('benchmark_runs_checkbox_group_label'),
|
18 |
-
value = list(BENCHMARKS.keys()),
|
19 |
-
choices = list(BENCHMARKS.keys())
|
20 |
-
)
|
21 |
-
BENCHMARK_CYCLES_SLIDER = gradio.Slider(
|
22 |
-
label = wording.get('benchmark_cycles_slider_label'),
|
23 |
-
value = 3,
|
24 |
-
step = 1,
|
25 |
-
minimum = 1,
|
26 |
-
maximum = 10
|
27 |
-
)
|
28 |
-
register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP)
|
29 |
-
register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/common_options.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
from typing import Optional, List
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
from DeepFakeAI import wording
|
6 |
-
from DeepFakeAI.uis import choices as uis_choices
|
7 |
-
|
8 |
-
COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
|
9 |
-
|
10 |
-
|
11 |
-
def render() -> None:
|
12 |
-
global COMMON_OPTIONS_CHECKBOX_GROUP
|
13 |
-
|
14 |
-
value = []
|
15 |
-
if DeepFakeAI.globals.keep_fps:
|
16 |
-
value.append('keep-fps')
|
17 |
-
if DeepFakeAI.globals.keep_temp:
|
18 |
-
value.append('keep-temp')
|
19 |
-
if DeepFakeAI.globals.skip_audio:
|
20 |
-
value.append('skip-audio')
|
21 |
-
if DeepFakeAI.globals.skip_download:
|
22 |
-
value.append('skip-download')
|
23 |
-
COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
|
24 |
-
label = wording.get('common_options_checkbox_group_label'),
|
25 |
-
choices = uis_choices.common_options,
|
26 |
-
value = value
|
27 |
-
)
|
28 |
-
|
29 |
-
|
30 |
-
def listen() -> None:
|
31 |
-
COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP)
|
32 |
-
|
33 |
-
|
34 |
-
def update(common_options : List[str]) -> None:
|
35 |
-
DeepFakeAI.globals.keep_fps = 'keep-fps' in common_options
|
36 |
-
DeepFakeAI.globals.keep_temp = 'keep-temp' in common_options
|
37 |
-
DeepFakeAI.globals.skip_audio = 'skip-audio' in common_options
|
38 |
-
DeepFakeAI.globals.skip_download = 'skip-download' in common_options
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/execution.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
import gradio
|
3 |
-
import onnxruntime
|
4 |
-
|
5 |
-
import DeepFakeAI.globals
|
6 |
-
from DeepFakeAI import wording
|
7 |
-
from DeepFakeAI.face_analyser import clear_face_analyser
|
8 |
-
from DeepFakeAI.processors.frame.core import clear_frame_processors_modules
|
9 |
-
from DeepFakeAI.execution_helper import encode_execution_providers, decode_execution_providers
|
10 |
-
|
11 |
-
EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
12 |
-
|
13 |
-
|
14 |
-
def render() -> None:
|
15 |
-
global EXECUTION_PROVIDERS_CHECKBOX_GROUP
|
16 |
-
|
17 |
-
EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
18 |
-
label = wording.get('execution_providers_checkbox_group_label'),
|
19 |
-
choices = encode_execution_providers(onnxruntime.get_available_providers()),
|
20 |
-
value = encode_execution_providers(DeepFakeAI.globals.execution_providers)
|
21 |
-
)
|
22 |
-
|
23 |
-
|
24 |
-
def listen() -> None:
|
25 |
-
EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
|
26 |
-
|
27 |
-
|
28 |
-
def update_execution_providers(execution_providers : List[str]) -> gradio.CheckboxGroup:
|
29 |
-
clear_face_analyser()
|
30 |
-
clear_frame_processors_modules()
|
31 |
-
if not execution_providers:
|
32 |
-
execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
|
33 |
-
DeepFakeAI.globals.execution_providers = decode_execution_providers(execution_providers)
|
34 |
-
return gradio.CheckboxGroup(value = execution_providers)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/execution_queue_count.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
import DeepFakeAI.choices
|
6 |
-
from DeepFakeAI import wording
|
7 |
-
|
8 |
-
EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
|
9 |
-
|
10 |
-
|
11 |
-
def render() -> None:
|
12 |
-
global EXECUTION_QUEUE_COUNT_SLIDER
|
13 |
-
|
14 |
-
EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
|
15 |
-
label = wording.get('execution_queue_count_slider_label'),
|
16 |
-
value = DeepFakeAI.globals.execution_queue_count,
|
17 |
-
step = DeepFakeAI.choices.execution_queue_count_range[1] - DeepFakeAI.choices.execution_queue_count_range[0],
|
18 |
-
minimum = DeepFakeAI.choices.execution_queue_count_range[0],
|
19 |
-
maximum = DeepFakeAI.choices.execution_queue_count_range[-1]
|
20 |
-
)
|
21 |
-
|
22 |
-
|
23 |
-
def listen() -> None:
|
24 |
-
EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER)
|
25 |
-
|
26 |
-
|
27 |
-
def update_execution_queue_count(execution_queue_count : int = 1) -> None:
|
28 |
-
DeepFakeAI.globals.execution_queue_count = execution_queue_count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/execution_thread_count.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
import DeepFakeAI.choices
|
6 |
-
from DeepFakeAI import wording
|
7 |
-
|
8 |
-
EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
|
9 |
-
|
10 |
-
|
11 |
-
def render() -> None:
|
12 |
-
global EXECUTION_THREAD_COUNT_SLIDER
|
13 |
-
|
14 |
-
EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
|
15 |
-
label = wording.get('execution_thread_count_slider_label'),
|
16 |
-
value = DeepFakeAI.globals.execution_thread_count,
|
17 |
-
step = DeepFakeAI.choices.execution_thread_count_range[1] - DeepFakeAI.choices.execution_thread_count_range[0],
|
18 |
-
minimum = DeepFakeAI.choices.execution_thread_count_range[0],
|
19 |
-
maximum = DeepFakeAI.choices.execution_thread_count_range[-1]
|
20 |
-
)
|
21 |
-
|
22 |
-
|
23 |
-
def listen() -> None:
|
24 |
-
EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
|
25 |
-
|
26 |
-
|
27 |
-
def update_execution_thread_count(execution_thread_count : int = 1) -> None:
|
28 |
-
DeepFakeAI.globals.execution_thread_count = execution_thread_count
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/face_analyser.py
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
|
3 |
-
import gradio
|
4 |
-
|
5 |
-
import DeepFakeAI.globals
|
6 |
-
import DeepFakeAI.choices
|
7 |
-
from DeepFakeAI import wording
|
8 |
-
from DeepFakeAI.typing import FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel
|
9 |
-
from DeepFakeAI.uis.core import register_ui_component
|
10 |
-
|
11 |
-
FACE_ANALYSER_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None
|
12 |
-
FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
|
13 |
-
FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
|
14 |
-
FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
|
15 |
-
FACE_DETECTOR_SCORE_SLIDER : Optional[gradio.Slider] = None
|
16 |
-
FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
17 |
-
|
18 |
-
|
19 |
-
def render() -> None:
|
20 |
-
global FACE_ANALYSER_ORDER_DROPDOWN
|
21 |
-
global FACE_ANALYSER_AGE_DROPDOWN
|
22 |
-
global FACE_ANALYSER_GENDER_DROPDOWN
|
23 |
-
global FACE_DETECTOR_SIZE_DROPDOWN
|
24 |
-
global FACE_DETECTOR_SCORE_SLIDER
|
25 |
-
global FACE_DETECTOR_MODEL_DROPDOWN
|
26 |
-
|
27 |
-
with gradio.Row():
|
28 |
-
FACE_ANALYSER_ORDER_DROPDOWN = gradio.Dropdown(
|
29 |
-
label = wording.get('face_analyser_order_dropdown_label'),
|
30 |
-
choices = DeepFakeAI.choices.face_analyser_orders,
|
31 |
-
value = DeepFakeAI.globals.face_analyser_order
|
32 |
-
)
|
33 |
-
FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
|
34 |
-
label = wording.get('face_analyser_age_dropdown_label'),
|
35 |
-
choices = [ 'none' ] + DeepFakeAI.choices.face_analyser_ages,
|
36 |
-
value = DeepFakeAI.globals.face_analyser_age or 'none'
|
37 |
-
)
|
38 |
-
FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
|
39 |
-
label = wording.get('face_analyser_gender_dropdown_label'),
|
40 |
-
choices = [ 'none' ] + DeepFakeAI.choices.face_analyser_genders,
|
41 |
-
value = DeepFakeAI.globals.face_analyser_gender or 'none'
|
42 |
-
)
|
43 |
-
FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown(
|
44 |
-
label = wording.get('face_detector_model_dropdown_label'),
|
45 |
-
choices = DeepFakeAI.choices.face_detector_models,
|
46 |
-
value = DeepFakeAI.globals.face_detector_model
|
47 |
-
)
|
48 |
-
FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(
|
49 |
-
label = wording.get('face_detector_size_dropdown_label'),
|
50 |
-
choices = DeepFakeAI.choices.face_detector_sizes,
|
51 |
-
value = DeepFakeAI.globals.face_detector_size
|
52 |
-
)
|
53 |
-
FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
|
54 |
-
label = wording.get('face_detector_score_slider_label'),
|
55 |
-
value = DeepFakeAI.globals.face_detector_score,
|
56 |
-
step = DeepFakeAI.choices.face_detector_score_range[1] - DeepFakeAI.choices.face_detector_score_range[0],
|
57 |
-
minimum = DeepFakeAI.choices.face_detector_score_range[0],
|
58 |
-
maximum = DeepFakeAI.choices.face_detector_score_range[-1]
|
59 |
-
)
|
60 |
-
register_ui_component('face_analyser_order_dropdown', FACE_ANALYSER_ORDER_DROPDOWN)
|
61 |
-
register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
|
62 |
-
register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN)
|
63 |
-
register_ui_component('face_detector_model_dropdown', FACE_DETECTOR_MODEL_DROPDOWN)
|
64 |
-
register_ui_component('face_detector_size_dropdown', FACE_DETECTOR_SIZE_DROPDOWN)
|
65 |
-
register_ui_component('face_detector_score_slider', FACE_DETECTOR_SCORE_SLIDER)
|
66 |
-
|
67 |
-
|
68 |
-
def listen() -> None:
|
69 |
-
FACE_ANALYSER_ORDER_DROPDOWN.select(update_face_analyser_order, inputs = FACE_ANALYSER_ORDER_DROPDOWN)
|
70 |
-
FACE_ANALYSER_AGE_DROPDOWN.select(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN)
|
71 |
-
FACE_ANALYSER_GENDER_DROPDOWN.select(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN)
|
72 |
-
FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN)
|
73 |
-
FACE_DETECTOR_SIZE_DROPDOWN.select(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN)
|
74 |
-
FACE_DETECTOR_SCORE_SLIDER.change(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER)
|
75 |
-
|
76 |
-
|
77 |
-
def update_face_analyser_order(face_analyser_order : FaceAnalyserOrder) -> None:
|
78 |
-
DeepFakeAI.globals.face_analyser_order = face_analyser_order if face_analyser_order != 'none' else None
|
79 |
-
|
80 |
-
|
81 |
-
def update_face_analyser_age(face_analyser_age : FaceAnalyserAge) -> None:
|
82 |
-
DeepFakeAI.globals.face_analyser_age = face_analyser_age if face_analyser_age != 'none' else None
|
83 |
-
|
84 |
-
|
85 |
-
def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> None:
|
86 |
-
DeepFakeAI.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None
|
87 |
-
|
88 |
-
|
89 |
-
def update_face_detector_model(face_detector_model : FaceDetectorModel) -> None:
|
90 |
-
DeepFakeAI.globals.face_detector_model = face_detector_model
|
91 |
-
|
92 |
-
|
93 |
-
def update_face_detector_size(face_detector_size : str) -> None:
|
94 |
-
DeepFakeAI.globals.face_detector_size = face_detector_size
|
95 |
-
|
96 |
-
|
97 |
-
def update_face_detector_score(face_detector_score : float) -> None:
|
98 |
-
DeepFakeAI.globals.face_detector_score = face_detector_score
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/face_masker.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
from typing import Optional, Tuple, List
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
import DeepFakeAI.choices
|
6 |
-
from DeepFakeAI import wording
|
7 |
-
from DeepFakeAI.typing import FaceMaskType, FaceMaskRegion
|
8 |
-
from DeepFakeAI.uis.core import register_ui_component
|
9 |
-
|
10 |
-
FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
11 |
-
FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None
|
12 |
-
FACE_MASK_BOX_GROUP : Optional[gradio.Group] = None
|
13 |
-
FACE_MASK_REGION_GROUP : Optional[gradio.Group] = None
|
14 |
-
FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None
|
15 |
-
FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None
|
16 |
-
FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None
|
17 |
-
FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None
|
18 |
-
FACE_MASK_REGION_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
19 |
-
|
20 |
-
|
21 |
-
def render() -> None:
|
22 |
-
global FACE_MASK_TYPES_CHECKBOX_GROUP
|
23 |
-
global FACE_MASK_BLUR_SLIDER
|
24 |
-
global FACE_MASK_BOX_GROUP
|
25 |
-
global FACE_MASK_REGION_GROUP
|
26 |
-
global FACE_MASK_PADDING_TOP_SLIDER
|
27 |
-
global FACE_MASK_PADDING_RIGHT_SLIDER
|
28 |
-
global FACE_MASK_PADDING_BOTTOM_SLIDER
|
29 |
-
global FACE_MASK_PADDING_LEFT_SLIDER
|
30 |
-
global FACE_MASK_REGION_CHECKBOX_GROUP
|
31 |
-
|
32 |
-
has_box_mask = 'box' in DeepFakeAI.globals.face_mask_types
|
33 |
-
has_region_mask = 'region' in DeepFakeAI.globals.face_mask_types
|
34 |
-
FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
35 |
-
label = wording.get('face_mask_types_checkbox_group_label'),
|
36 |
-
choices = DeepFakeAI.choices.face_mask_types,
|
37 |
-
value = DeepFakeAI.globals.face_mask_types
|
38 |
-
)
|
39 |
-
with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP:
|
40 |
-
FACE_MASK_BLUR_SLIDER = gradio.Slider(
|
41 |
-
label = wording.get('face_mask_blur_slider_label'),
|
42 |
-
step = DeepFakeAI.choices.face_mask_blur_range[1] - DeepFakeAI.choices.face_mask_blur_range[0],
|
43 |
-
minimum = DeepFakeAI.choices.face_mask_blur_range[0],
|
44 |
-
maximum = DeepFakeAI.choices.face_mask_blur_range[-1],
|
45 |
-
value = DeepFakeAI.globals.face_mask_blur
|
46 |
-
)
|
47 |
-
with gradio.Row():
|
48 |
-
FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider(
|
49 |
-
label = wording.get('face_mask_padding_top_slider_label'),
|
50 |
-
step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0],
|
51 |
-
minimum = DeepFakeAI.choices.face_mask_padding_range[0],
|
52 |
-
maximum = DeepFakeAI.choices.face_mask_padding_range[-1],
|
53 |
-
value = DeepFakeAI.globals.face_mask_padding[0]
|
54 |
-
)
|
55 |
-
FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider(
|
56 |
-
label = wording.get('face_mask_padding_right_slider_label'),
|
57 |
-
step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0],
|
58 |
-
minimum = DeepFakeAI.choices.face_mask_padding_range[0],
|
59 |
-
maximum = DeepFakeAI.choices.face_mask_padding_range[-1],
|
60 |
-
value = DeepFakeAI.globals.face_mask_padding[1]
|
61 |
-
)
|
62 |
-
with gradio.Row():
|
63 |
-
FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider(
|
64 |
-
label = wording.get('face_mask_padding_bottom_slider_label'),
|
65 |
-
step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0],
|
66 |
-
minimum = DeepFakeAI.choices.face_mask_padding_range[0],
|
67 |
-
maximum = DeepFakeAI.choices.face_mask_padding_range[-1],
|
68 |
-
value = DeepFakeAI.globals.face_mask_padding[2]
|
69 |
-
)
|
70 |
-
FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider(
|
71 |
-
label = wording.get('face_mask_padding_left_slider_label'),
|
72 |
-
step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0],
|
73 |
-
minimum = DeepFakeAI.choices.face_mask_padding_range[0],
|
74 |
-
maximum = DeepFakeAI.choices.face_mask_padding_range[-1],
|
75 |
-
value = DeepFakeAI.globals.face_mask_padding[3]
|
76 |
-
)
|
77 |
-
with gradio.Row():
|
78 |
-
FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
79 |
-
label = wording.get('face_mask_region_checkbox_group_label'),
|
80 |
-
choices = DeepFakeAI.choices.face_mask_regions,
|
81 |
-
value = DeepFakeAI.globals.face_mask_regions,
|
82 |
-
visible = has_region_mask
|
83 |
-
)
|
84 |
-
register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP)
|
85 |
-
register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER)
|
86 |
-
register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER)
|
87 |
-
register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER)
|
88 |
-
register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER)
|
89 |
-
register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER)
|
90 |
-
register_ui_component('face_mask_region_checkbox_group', FACE_MASK_REGION_CHECKBOX_GROUP)
|
91 |
-
|
92 |
-
|
93 |
-
def listen() -> None:
|
94 |
-
FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ])
|
95 |
-
FACE_MASK_BLUR_SLIDER.change(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
|
96 |
-
FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP)
|
97 |
-
face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]
|
98 |
-
for face_mask_padding_slider in face_mask_padding_sliders:
|
99 |
-
face_mask_padding_slider.change(update_face_mask_padding, inputs = face_mask_padding_sliders)
|
100 |
-
|
101 |
-
|
102 |
-
def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]:
|
103 |
-
if not face_mask_types:
|
104 |
-
face_mask_types = DeepFakeAI.choices.face_mask_types
|
105 |
-
DeepFakeAI.globals.face_mask_types = face_mask_types
|
106 |
-
has_box_mask = 'box' in face_mask_types
|
107 |
-
has_region_mask = 'region' in face_mask_types
|
108 |
-
return gradio.CheckboxGroup(value = face_mask_types), gradio.Group(visible = has_box_mask), gradio.CheckboxGroup(visible = has_region_mask)
|
109 |
-
|
110 |
-
|
111 |
-
def update_face_mask_blur(face_mask_blur : float) -> None:
|
112 |
-
DeepFakeAI.globals.face_mask_blur = face_mask_blur
|
113 |
-
|
114 |
-
|
115 |
-
def update_face_mask_padding(face_mask_padding_top : int, face_mask_padding_right : int, face_mask_padding_bottom : int, face_mask_padding_left : int) -> None:
|
116 |
-
DeepFakeAI.globals.face_mask_padding = (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left)
|
117 |
-
|
118 |
-
|
119 |
-
def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup:
|
120 |
-
if not face_mask_regions:
|
121 |
-
face_mask_regions = DeepFakeAI.choices.face_mask_regions
|
122 |
-
DeepFakeAI.globals.face_mask_regions = face_mask_regions
|
123 |
-
return gradio.CheckboxGroup(value = face_mask_regions)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/face_selector.py
DELETED
@@ -1,164 +0,0 @@
|
|
1 |
-
from typing import List, Optional, Tuple, Any, Dict
|
2 |
-
|
3 |
-
import gradio
|
4 |
-
|
5 |
-
import DeepFakeAI.globals
|
6 |
-
import DeepFakeAI.choices
|
7 |
-
from DeepFakeAI import wording
|
8 |
-
from DeepFakeAI.face_store import clear_static_faces, clear_reference_faces
|
9 |
-
from DeepFakeAI.vision import get_video_frame, read_static_image, normalize_frame_color
|
10 |
-
from DeepFakeAI.face_analyser import get_many_faces
|
11 |
-
from DeepFakeAI.typing import Frame, FaceSelectorMode
|
12 |
-
from DeepFakeAI.filesystem import is_image, is_video
|
13 |
-
from DeepFakeAI.uis.core import get_ui_component, register_ui_component
|
14 |
-
from DeepFakeAI.uis.typing import ComponentName
|
15 |
-
|
16 |
-
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
|
17 |
-
REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
|
18 |
-
REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
|
19 |
-
|
20 |
-
|
21 |
-
def render() -> None:
|
22 |
-
global FACE_SELECTOR_MODE_DROPDOWN
|
23 |
-
global REFERENCE_FACE_POSITION_GALLERY
|
24 |
-
global REFERENCE_FACE_DISTANCE_SLIDER
|
25 |
-
|
26 |
-
reference_face_gallery_args: Dict[str, Any] =\
|
27 |
-
{
|
28 |
-
'label': wording.get('reference_face_gallery_label'),
|
29 |
-
'object_fit': 'cover',
|
30 |
-
'columns': 8,
|
31 |
-
'allow_preview': False,
|
32 |
-
'visible': 'reference' in DeepFakeAI.globals.face_selector_mode
|
33 |
-
}
|
34 |
-
if is_image(DeepFakeAI.globals.target_path):
|
35 |
-
reference_frame = read_static_image(DeepFakeAI.globals.target_path)
|
36 |
-
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
|
37 |
-
if is_video(DeepFakeAI.globals.target_path):
|
38 |
-
reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number)
|
39 |
-
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
|
40 |
-
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
|
41 |
-
label = wording.get('face_selector_mode_dropdown_label'),
|
42 |
-
choices = DeepFakeAI.choices.face_selector_modes,
|
43 |
-
value = DeepFakeAI.globals.face_selector_mode
|
44 |
-
)
|
45 |
-
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
|
46 |
-
REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
|
47 |
-
label = wording.get('reference_face_distance_slider_label'),
|
48 |
-
value = DeepFakeAI.globals.reference_face_distance,
|
49 |
-
step = DeepFakeAI.choices.reference_face_distance_range[1] - DeepFakeAI.choices.reference_face_distance_range[0],
|
50 |
-
minimum = DeepFakeAI.choices.reference_face_distance_range[0],
|
51 |
-
maximum = DeepFakeAI.choices.reference_face_distance_range[-1],
|
52 |
-
visible = 'reference' in DeepFakeAI.globals.face_selector_mode
|
53 |
-
)
|
54 |
-
register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN)
|
55 |
-
register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
|
56 |
-
register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
|
57 |
-
|
58 |
-
|
59 |
-
def listen() -> None:
|
60 |
-
FACE_SELECTOR_MODE_DROPDOWN.select(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
|
61 |
-
REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position)
|
62 |
-
REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
|
63 |
-
multi_component_names : List[ComponentName] =\
|
64 |
-
[
|
65 |
-
'target_image',
|
66 |
-
'target_video'
|
67 |
-
]
|
68 |
-
for component_name in multi_component_names:
|
69 |
-
component = get_ui_component(component_name)
|
70 |
-
if component:
|
71 |
-
for method in [ 'upload', 'change', 'clear' ]:
|
72 |
-
getattr(component, method)(update_reference_face_position)
|
73 |
-
getattr(component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
74 |
-
change_one_component_names : List[ComponentName] =\
|
75 |
-
[
|
76 |
-
'face_analyser_order_dropdown',
|
77 |
-
'face_analyser_age_dropdown',
|
78 |
-
'face_analyser_gender_dropdown'
|
79 |
-
]
|
80 |
-
for component_name in change_one_component_names:
|
81 |
-
component = get_ui_component(component_name)
|
82 |
-
if component:
|
83 |
-
component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
84 |
-
change_two_component_names : List[ComponentName] =\
|
85 |
-
[
|
86 |
-
'face_detector_model_dropdown',
|
87 |
-
'face_detector_size_dropdown',
|
88 |
-
'face_detector_score_slider'
|
89 |
-
]
|
90 |
-
for component_name in change_two_component_names:
|
91 |
-
component = get_ui_component(component_name)
|
92 |
-
if component:
|
93 |
-
component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
94 |
-
preview_frame_slider = get_ui_component('preview_frame_slider')
|
95 |
-
if preview_frame_slider:
|
96 |
-
preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider)
|
97 |
-
preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
|
98 |
-
|
99 |
-
|
100 |
-
def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
|
101 |
-
if face_selector_mode == 'reference':
|
102 |
-
DeepFakeAI.globals.face_selector_mode = face_selector_mode
|
103 |
-
return gradio.Gallery(visible = True), gradio.Slider(visible = True)
|
104 |
-
if face_selector_mode == 'one':
|
105 |
-
DeepFakeAI.globals.face_selector_mode = face_selector_mode
|
106 |
-
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
|
107 |
-
if face_selector_mode == 'many':
|
108 |
-
DeepFakeAI.globals.face_selector_mode = face_selector_mode
|
109 |
-
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
|
110 |
-
|
111 |
-
|
112 |
-
def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
|
113 |
-
clear_reference_faces()
|
114 |
-
clear_static_faces()
|
115 |
-
update_reference_face_position(event.index)
|
116 |
-
return update_reference_position_gallery()
|
117 |
-
|
118 |
-
|
119 |
-
def update_reference_face_position(reference_face_position : int = 0) -> None:
|
120 |
-
DeepFakeAI.globals.reference_face_position = reference_face_position
|
121 |
-
|
122 |
-
|
123 |
-
def update_reference_face_distance(reference_face_distance : float) -> None:
|
124 |
-
DeepFakeAI.globals.reference_face_distance = reference_face_distance
|
125 |
-
|
126 |
-
|
127 |
-
def update_reference_frame_number(reference_frame_number : int) -> None:
|
128 |
-
DeepFakeAI.globals.reference_frame_number = reference_frame_number
|
129 |
-
|
130 |
-
|
131 |
-
def clear_and_update_reference_position_gallery() -> gradio.Gallery:
|
132 |
-
clear_reference_faces()
|
133 |
-
clear_static_faces()
|
134 |
-
return update_reference_position_gallery()
|
135 |
-
|
136 |
-
|
137 |
-
def update_reference_position_gallery() -> gradio.Gallery:
|
138 |
-
gallery_frames = []
|
139 |
-
if is_image(DeepFakeAI.globals.target_path):
|
140 |
-
reference_frame = read_static_image(DeepFakeAI.globals.target_path)
|
141 |
-
gallery_frames = extract_gallery_frames(reference_frame)
|
142 |
-
if is_video(DeepFakeAI.globals.target_path):
|
143 |
-
reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number)
|
144 |
-
gallery_frames = extract_gallery_frames(reference_frame)
|
145 |
-
if gallery_frames:
|
146 |
-
return gradio.Gallery(value = gallery_frames)
|
147 |
-
return gradio.Gallery(value = None)
|
148 |
-
|
149 |
-
|
150 |
-
def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
|
151 |
-
crop_frames = []
|
152 |
-
faces = get_many_faces(reference_frame)
|
153 |
-
for face in faces:
|
154 |
-
start_x, start_y, end_x, end_y = map(int, face.bbox)
|
155 |
-
padding_x = int((end_x - start_x) * 0.25)
|
156 |
-
padding_y = int((end_y - start_y) * 0.25)
|
157 |
-
start_x = max(0, start_x - padding_x)
|
158 |
-
start_y = max(0, start_y - padding_y)
|
159 |
-
end_x = max(0, end_x + padding_x)
|
160 |
-
end_y = max(0, end_y + padding_y)
|
161 |
-
crop_frame = reference_frame[start_y:end_y, start_x:end_x]
|
162 |
-
crop_frame = normalize_frame_color(crop_frame)
|
163 |
-
crop_frames.append(crop_frame)
|
164 |
-
return crop_frames
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/frame_processors.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
from DeepFakeAI import wording
|
6 |
-
from DeepFakeAI.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
|
7 |
-
from DeepFakeAI.filesystem import list_module_names
|
8 |
-
from DeepFakeAI.uis.core import register_ui_component
|
9 |
-
|
10 |
-
FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
11 |
-
|
12 |
-
|
13 |
-
def render() -> None:
|
14 |
-
global FRAME_PROCESSORS_CHECKBOX_GROUP
|
15 |
-
|
16 |
-
FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
17 |
-
label = wording.get('frame_processors_checkbox_group_label'),
|
18 |
-
choices = sort_frame_processors(DeepFakeAI.globals.frame_processors),
|
19 |
-
value = DeepFakeAI.globals.frame_processors
|
20 |
-
)
|
21 |
-
register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP)
|
22 |
-
|
23 |
-
|
24 |
-
def listen() -> None:
|
25 |
-
FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP)
|
26 |
-
|
27 |
-
|
28 |
-
def update_frame_processors(frame_processors : List[str]) -> gradio.CheckboxGroup:
|
29 |
-
DeepFakeAI.globals.frame_processors = frame_processors
|
30 |
-
clear_frame_processors_modules()
|
31 |
-
for frame_processor in frame_processors:
|
32 |
-
frame_processor_module = load_frame_processor_module(frame_processor)
|
33 |
-
if not frame_processor_module.pre_check():
|
34 |
-
return gradio.CheckboxGroup()
|
35 |
-
return gradio.CheckboxGroup(value = frame_processors, choices = sort_frame_processors(frame_processors))
|
36 |
-
|
37 |
-
|
38 |
-
def sort_frame_processors(frame_processors : List[str]) -> list[str]:
|
39 |
-
available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules')
|
40 |
-
return sorted(available_frame_processors, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/frame_processors_options.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
from typing import List, Optional, Tuple
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
from DeepFakeAI import wording
|
6 |
-
from DeepFakeAI.processors.frame.core import load_frame_processor_module
|
7 |
-
from DeepFakeAI.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
|
8 |
-
from DeepFakeAI.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
|
9 |
-
from DeepFakeAI.uis.core import get_ui_component, register_ui_component
|
10 |
-
|
11 |
-
FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
12 |
-
FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
13 |
-
FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
14 |
-
FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
|
15 |
-
FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
|
16 |
-
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
|
17 |
-
|
18 |
-
|
19 |
-
def render() -> None:
|
20 |
-
global FACE_SWAPPER_MODEL_DROPDOWN
|
21 |
-
global FACE_ENHANCER_MODEL_DROPDOWN
|
22 |
-
global FACE_ENHANCER_BLEND_SLIDER
|
23 |
-
global FRAME_ENHANCER_MODEL_DROPDOWN
|
24 |
-
global FRAME_ENHANCER_BLEND_SLIDER
|
25 |
-
global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP
|
26 |
-
|
27 |
-
FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
|
28 |
-
label = wording.get('face_swapper_model_dropdown_label'),
|
29 |
-
choices = frame_processors_choices.face_swapper_models,
|
30 |
-
value = frame_processors_globals.face_swapper_model,
|
31 |
-
visible = 'face_swapper' in DeepFakeAI.globals.frame_processors
|
32 |
-
)
|
33 |
-
FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
|
34 |
-
label = wording.get('face_enhancer_model_dropdown_label'),
|
35 |
-
choices = frame_processors_choices.face_enhancer_models,
|
36 |
-
value = frame_processors_globals.face_enhancer_model,
|
37 |
-
visible = 'face_enhancer' in DeepFakeAI.globals.frame_processors
|
38 |
-
)
|
39 |
-
FACE_ENHANCER_BLEND_SLIDER = gradio.Slider(
|
40 |
-
label = wording.get('face_enhancer_blend_slider_label'),
|
41 |
-
value = frame_processors_globals.face_enhancer_blend,
|
42 |
-
step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0],
|
43 |
-
minimum = frame_processors_choices.face_enhancer_blend_range[0],
|
44 |
-
maximum = frame_processors_choices.face_enhancer_blend_range[-1],
|
45 |
-
visible = 'face_enhancer' in DeepFakeAI.globals.frame_processors
|
46 |
-
)
|
47 |
-
FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
|
48 |
-
label = wording.get('frame_enhancer_model_dropdown_label'),
|
49 |
-
choices = frame_processors_choices.frame_enhancer_models,
|
50 |
-
value = frame_processors_globals.frame_enhancer_model,
|
51 |
-
visible = 'frame_enhancer' in DeepFakeAI.globals.frame_processors
|
52 |
-
)
|
53 |
-
FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider(
|
54 |
-
label = wording.get('frame_enhancer_blend_slider_label'),
|
55 |
-
value = frame_processors_globals.frame_enhancer_blend,
|
56 |
-
step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0],
|
57 |
-
minimum = frame_processors_choices.frame_enhancer_blend_range[0],
|
58 |
-
maximum = frame_processors_choices.frame_enhancer_blend_range[-1],
|
59 |
-
visible = 'face_enhancer' in DeepFakeAI.globals.frame_processors
|
60 |
-
)
|
61 |
-
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup(
|
62 |
-
label = wording.get('face_debugger_items_checkbox_group_label'),
|
63 |
-
choices = frame_processors_choices.face_debugger_items,
|
64 |
-
value = frame_processors_globals.face_debugger_items,
|
65 |
-
visible = 'face_debugger' in DeepFakeAI.globals.frame_processors
|
66 |
-
)
|
67 |
-
|
68 |
-
register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
|
69 |
-
register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
|
70 |
-
register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
|
71 |
-
register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
|
72 |
-
register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
|
73 |
-
register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
|
74 |
-
|
75 |
-
|
76 |
-
def listen() -> None:
|
77 |
-
FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
|
78 |
-
FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN)
|
79 |
-
FACE_ENHANCER_BLEND_SLIDER.change(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
|
80 |
-
FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
|
81 |
-
FRAME_ENHANCER_BLEND_SLIDER.change(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
|
82 |
-
FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
|
83 |
-
frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group')
|
84 |
-
if frame_processors_checkbox_group:
|
85 |
-
frame_processors_checkbox_group.change(toggle_face_swapper_model, inputs = frame_processors_checkbox_group, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP ])
|
86 |
-
|
87 |
-
|
88 |
-
def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown:
|
89 |
-
frame_processors_globals.face_swapper_model = face_swapper_model
|
90 |
-
if face_swapper_model == 'blendswap_256':
|
91 |
-
DeepFakeAI.globals.face_recognizer_model = 'arcface_blendswap'
|
92 |
-
if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16':
|
93 |
-
DeepFakeAI.globals.face_recognizer_model = 'arcface_inswapper'
|
94 |
-
if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial':
|
95 |
-
DeepFakeAI.globals.face_recognizer_model = 'arcface_simswap'
|
96 |
-
face_swapper_module = load_frame_processor_module('face_swapper')
|
97 |
-
face_swapper_module.clear_frame_processor()
|
98 |
-
face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model])
|
99 |
-
if not face_swapper_module.pre_check():
|
100 |
-
return gradio.Dropdown()
|
101 |
-
return gradio.Dropdown(value = face_swapper_model)
|
102 |
-
|
103 |
-
|
104 |
-
def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown:
|
105 |
-
frame_processors_globals.face_enhancer_model = face_enhancer_model
|
106 |
-
face_enhancer_module = load_frame_processor_module('face_enhancer')
|
107 |
-
face_enhancer_module.clear_frame_processor()
|
108 |
-
face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model])
|
109 |
-
if not face_enhancer_module.pre_check():
|
110 |
-
return gradio.Dropdown()
|
111 |
-
return gradio.Dropdown(value = face_enhancer_model)
|
112 |
-
|
113 |
-
|
114 |
-
def update_face_enhancer_blend(face_enhancer_blend : int) -> None:
|
115 |
-
frame_processors_globals.face_enhancer_blend = face_enhancer_blend
|
116 |
-
|
117 |
-
|
118 |
-
def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
|
119 |
-
frame_processors_globals.frame_enhancer_model = frame_enhancer_model
|
120 |
-
frame_enhancer_module = load_frame_processor_module('frame_enhancer')
|
121 |
-
frame_enhancer_module.clear_frame_processor()
|
122 |
-
frame_enhancer_module.set_options('model', frame_enhancer_module.MODELS[frame_enhancer_model])
|
123 |
-
if not frame_enhancer_module.pre_check():
|
124 |
-
return gradio.Dropdown()
|
125 |
-
return gradio.Dropdown(value = frame_enhancer_model)
|
126 |
-
|
127 |
-
|
128 |
-
def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None:
|
129 |
-
frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend
|
130 |
-
|
131 |
-
|
132 |
-
def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
|
133 |
-
frame_processors_globals.face_debugger_items = face_debugger_items
|
134 |
-
|
135 |
-
|
136 |
-
def toggle_face_swapper_model(frame_processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.CheckboxGroup]:
|
137 |
-
has_face_swapper = 'face_swapper' in frame_processors
|
138 |
-
has_face_enhancer = 'face_enhancer' in frame_processors
|
139 |
-
has_frame_enhancer = 'frame_enhancer' in frame_processors
|
140 |
-
has_face_debugger = 'face_debugger' in frame_processors
|
141 |
-
return gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.CheckboxGroup(visible = has_face_debugger)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/limit_resources.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
from typing import Optional
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
import DeepFakeAI.choices
|
6 |
-
from DeepFakeAI import wording
|
7 |
-
|
8 |
-
MAX_MEMORY_SLIDER : Optional[gradio.Slider] = None
|
9 |
-
|
10 |
-
|
11 |
-
def render() -> None:
|
12 |
-
global MAX_MEMORY_SLIDER
|
13 |
-
|
14 |
-
MAX_MEMORY_SLIDER = gradio.Slider(
|
15 |
-
label = wording.get('max_memory_slider_label'),
|
16 |
-
step = DeepFakeAI.choices.max_memory_range[1] - DeepFakeAI.choices.max_memory_range[0],
|
17 |
-
minimum = DeepFakeAI.choices.max_memory_range[0],
|
18 |
-
maximum = DeepFakeAI.choices.max_memory_range[-1]
|
19 |
-
)
|
20 |
-
|
21 |
-
|
22 |
-
def listen() -> None:
|
23 |
-
MAX_MEMORY_SLIDER.change(update_max_memory, inputs = MAX_MEMORY_SLIDER)
|
24 |
-
|
25 |
-
|
26 |
-
def update_max_memory(max_memory : int) -> None:
|
27 |
-
DeepFakeAI.globals.max_memory = max_memory if max_memory > 0 else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/output.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
from typing import Tuple, Optional
|
2 |
-
import gradio
|
3 |
-
|
4 |
-
import DeepFakeAI.globals
|
5 |
-
from DeepFakeAI import wording
|
6 |
-
from DeepFakeAI.core import limit_resources, conditional_process
|
7 |
-
from DeepFakeAI.uis.core import get_ui_component
|
8 |
-
from DeepFakeAI.normalizer import normalize_output_path
|
9 |
-
from DeepFakeAI.filesystem import is_image, is_video, clear_temp
|
10 |
-
|
11 |
-
OUTPUT_IMAGE : Optional[gradio.Image] = None
|
12 |
-
OUTPUT_VIDEO : Optional[gradio.Video] = None
|
13 |
-
OUTPUT_START_BUTTON : Optional[gradio.Button] = None
|
14 |
-
OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None
|
15 |
-
|
16 |
-
|
17 |
-
def render() -> None:
|
18 |
-
global OUTPUT_IMAGE
|
19 |
-
global OUTPUT_VIDEO
|
20 |
-
global OUTPUT_START_BUTTON
|
21 |
-
global OUTPUT_CLEAR_BUTTON
|
22 |
-
|
23 |
-
OUTPUT_IMAGE = gradio.Image(
|
24 |
-
label = wording.get('output_image_or_video_label'),
|
25 |
-
visible = False
|
26 |
-
)
|
27 |
-
OUTPUT_VIDEO = gradio.Video(
|
28 |
-
label = wording.get('output_image_or_video_label')
|
29 |
-
)
|
30 |
-
OUTPUT_START_BUTTON = gradio.Button(
|
31 |
-
value = wording.get('start_button_label'),
|
32 |
-
variant = 'primary',
|
33 |
-
size = 'sm'
|
34 |
-
)
|
35 |
-
OUTPUT_CLEAR_BUTTON = gradio.Button(
|
36 |
-
value = wording.get('clear_button_label'),
|
37 |
-
size = 'sm'
|
38 |
-
)
|
39 |
-
|
40 |
-
|
41 |
-
def listen() -> None:
|
42 |
-
output_path_textbox = get_ui_component('output_path_textbox')
|
43 |
-
if output_path_textbox:
|
44 |
-
OUTPUT_START_BUTTON.click(start, inputs = output_path_textbox, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
|
45 |
-
OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
|
46 |
-
|
47 |
-
|
48 |
-
def start(output_path : str) -> Tuple[gradio.Image, gradio.Video]:
|
49 |
-
DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.target_path, output_path)
|
50 |
-
limit_resources()
|
51 |
-
conditional_process()
|
52 |
-
if is_image(DeepFakeAI.globals.output_path):
|
53 |
-
return gradio.Image(value = DeepFakeAI.globals.output_path, visible = True), gradio.Video(value = None, visible = False)
|
54 |
-
if is_video(DeepFakeAI.globals.output_path):
|
55 |
-
return gradio.Image(value = None, visible = False), gradio.Video(value = DeepFakeAI.globals.output_path, visible = True)
|
56 |
-
return gradio.Image(), gradio.Video()
|
57 |
-
|
58 |
-
|
59 |
-
def clear() -> Tuple[gradio.Image, gradio.Video]:
|
60 |
-
if DeepFakeAI.globals.target_path:
|
61 |
-
clear_temp(DeepFakeAI.globals.target_path)
|
62 |
-
return gradio.Image(value = None), gradio.Video(value = None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DeepFakeAI/uis/components/output_options.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
from typing import Optional, Tuple, List
|
2 |
-
import tempfile
|
3 |
-
import gradio
|
4 |
-
|
5 |
-
import DeepFakeAI.globals
|
6 |
-
import DeepFakeAI.choices
|
7 |
-
from DeepFakeAI import wording
|
8 |
-
from DeepFakeAI.typing import OutputVideoEncoder
|
9 |
-
from DeepFakeAI.filesystem import is_image, is_video
|
10 |
-
from DeepFakeAI.uis.typing import ComponentName
|
11 |
-
from DeepFakeAI.uis.core import get_ui_component, register_ui_component
|
12 |
-
|
13 |
-
OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None
|
14 |
-
OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None
|
15 |
-
OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
|
16 |
-
OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None
|
17 |
-
|
18 |
-
|
19 |
-
def render() -> None:
|
20 |
-
global OUTPUT_PATH_TEXTBOX
|
21 |
-
global OUTPUT_IMAGE_QUALITY_SLIDER
|
22 |
-
global OUTPUT_VIDEO_ENCODER_DROPDOWN
|
23 |
-
global OUTPUT_VIDEO_QUALITY_SLIDER
|
24 |
-
|
25 |
-
OUTPUT_PATH_TEXTBOX = gradio.Textbox(
|
26 |
-
label = wording.get('output_path_textbox_label'),
|
27 |
-
value = DeepFakeAI.globals.output_path or tempfile.gettempdir(),
|
28 |
-
max_lines = 1
|
29 |
-
)
|
30 |
-
OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider(
|
31 |
-
label = wording.get('output_image_quality_slider_label'),
|
32 |
-
value = DeepFakeAI.globals.output_image_quality,
|
33 |
-
step = DeepFakeAI.choices.output_image_quality_range[1] - DeepFakeAI.choices.output_image_quality_range[0],
|
34 |
-
minimum = DeepFakeAI.choices.output_image_quality_range[0],
|
35 |
-
maximum = DeepFakeAI.choices.output_image_quality_range[-1],
|
36 |
-
visible = is_image(DeepFakeAI.globals.target_path)
|
37 |
-
)
|
38 |
-
OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
|
39 |
-
label = wording.get('output_video_encoder_dropdown_label'),
|
40 |
-
choices = DeepFakeAI.choices.output_video_encoders,
|
41 |
-
value = DeepFakeAI.globals.output_video_encoder,
|
42 |
-
visible = is_video(DeepFakeAI.globals.target_path)
|
43 |
-
)
|
44 |
-
OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
|
45 |
-
label = wording.get('output_video_quality_slider_label'),
|
46 |
-
value = DeepFakeAI.globals.output_video_quality,
|
47 |
-
step = DeepFakeAI.choices.output_video_quality_range[1] - DeepFakeAI.choices.output_video_quality_range[0],
|
48 |
-
minimum = DeepFakeAI.choices.output_video_quality_range[0],
|
49 |
-
maximum = DeepFakeAI.choices.output_video_quality_range[-1],
|
50 |
-
visible = is_video(DeepFakeAI.globals.target_path)
|
51 |
-
)
|
52 |
-
register_ui_component('output_path_textbox', OUTPUT_PATH_TEXTBOX)
|
53 |
-
|
54 |
-
|
55 |
-
def listen() -> None:
|
56 |
-
OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX)
|
57 |
-
OUTPUT_IMAGE_QUALITY_SLIDER.change(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER)
|
58 |
-
OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
|
59 |
-
OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER)
|
60 |
-
multi_component_names : List[ComponentName] =\
|
61 |
-
[
|
62 |
-
'source_image',
|
63 |
-
'target_image',
|
64 |
-
'target_video'
|
65 |
-
]
|
66 |
-
for component_name in multi_component_names:
|
67 |
-
component = get_ui_component(component_name)
|
68 |
-
if component:
|
69 |
-
for method in [ 'upload', 'change', 'clear' ]:
|
70 |
-
getattr(component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER ])
|
71 |
-
|
72 |
-
|
73 |
-
def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Slider]:
|
74 |
-
if is_image(DeepFakeAI.globals.target_path):
|
75 |
-
return gradio.Slider(visible = True), gradio.Dropdown(visible = False), gradio.Slider(visible = False)
|
76 |
-
if is_video(DeepFakeAI.globals.target_path):
|
77 |
-
return gradio.Slider(visible = False), gradio.Dropdown(visible = True), gradio.Slider(visible = True)
|
78 |
-
return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False)
|
79 |
-
|
80 |
-
|
81 |
-
def update_output_path(output_path : str) -> None:
|
82 |
-
DeepFakeAI.globals.output_path = output_path
|
83 |
-
|
84 |
-
|
85 |
-
def update_output_image_quality(output_image_quality : int) -> None:
|
86 |
-
DeepFakeAI.globals.output_image_quality = output_image_quality
|
87 |
-
|
88 |
-
|
89 |
-
def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> None:
|
90 |
-
DeepFakeAI.globals.output_video_encoder = output_video_encoder
|
91 |
-
|
92 |
-
|
93 |
-
def update_output_video_quality(output_video_quality : int) -> None:
|
94 |
-
DeepFakeAI.globals.output_video_quality = output_video_quality
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|