diff --git a/DeepFakeAI/__init__.py b/DeepFakeAI/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/choices.py b/DeepFakeAI/choices.py deleted file mode 100644 index 167229d40c07ad56727e90a2e2c5b5d04f7c449c..0000000000000000000000000000000000000000 --- a/DeepFakeAI/choices.py +++ /dev/null @@ -1,26 +0,0 @@ -from typing import List - -from DeepFakeAI.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, TempFrameFormat, OutputVideoEncoder -from DeepFakeAI.common_helper import create_range - -face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ] -face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ] -face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ] -face_detector_models : List[str] = [ 'retinaface', 'yunet' ] -face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ] -face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ] -face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ] -face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ] -temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ] -output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] - -execution_thread_count_range : List[float] = create_range(1, 128, 1) -execution_queue_count_range : List[float] = create_range(1, 32, 1) -max_memory_range : List[float] = create_range(0, 128, 1) -face_detector_score_range : List[float] = create_range(0.0, 1.0, 0.05) -face_mask_blur_range : List[float] = create_range(0.0, 1.0, 0.05) -face_mask_padding_range : List[float] = create_range(0, 100, 1) -reference_face_distance_range : List[float] = create_range(0.0, 1.5, 0.05) -temp_frame_quality_range : List[float] = create_range(0, 100, 1) -output_image_quality_range : List[float] = create_range(0, 100, 1) -output_video_quality_range : List[float] = create_range(0, 100, 1) diff --git a/DeepFakeAI/common_helper.py b/DeepFakeAI/common_helper.py deleted file mode 100644 index 8ddcad8d379909b269033777e1f640ddbde5bc9b..0000000000000000000000000000000000000000 --- a/DeepFakeAI/common_helper.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import List, Any -import numpy - - -def create_metavar(ranges : List[Any]) -> str: - return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']' - - -def create_range(start : float, stop : float, step : float) -> List[float]: - return (numpy.around(numpy.arange(start, stop + step, step), decimals = 2)).tolist() diff --git a/DeepFakeAI/content_analyser.py b/DeepFakeAI/content_analyser.py deleted file mode 100644 index d2e9e479b232e2c221621215f89fc78c087273c0..0000000000000000000000000000000000000000 --- a/DeepFakeAI/content_analyser.py +++ /dev/null @@ -1,103 +0,0 @@ -from typing import Any, Dict -from functools import lru_cache -import threading -import cv2 -import numpy -import onnxruntime -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.typing import Frame, ModelValue -from DeepFakeAI.vision import get_video_frame, count_video_frame_total, read_image, detect_fps -from DeepFakeAI.filesystem import resolve_relative_path -from DeepFakeAI.download import conditional_download - -CONTENT_ANALYSER = None -THREAD_LOCK : threading.Lock = threading.Lock() -MODELS : Dict[str, ModelValue] =\ -{ - 'open_nsfw': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/open_nsfw.onnx', - 'path': resolve_relative_path('../.assets/models/open_nsfw.onnx') - } -} -MAX_PROBABILITY = 0.80 -MAX_RATE = 5 -STREAM_COUNTER = 0 - - -def get_content_analyser() -> Any: - global CONTENT_ANALYSER - - with THREAD_LOCK: - if CONTENT_ANALYSER is None: - model_path = MODELS.get('open_nsfw').get('path') - CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers) - return CONTENT_ANALYSER - - -def clear_content_analyser() -> None: - global CONTENT_ANALYSER - - CONTENT_ANALYSER = None - - -def pre_check() -> bool: - if not DeepFakeAI.globals.skip_download: - download_directory_path = resolve_relative_path('../.assets/models') - model_url = MODELS.get('open_nsfw').get('url') - conditional_download(download_directory_path, [ model_url ]) - return True - - -def analyse_stream(frame : Frame, fps : float) -> bool: - global STREAM_COUNTER - - STREAM_COUNTER = STREAM_COUNTER + 1 - if STREAM_COUNTER % int(fps) == 0: - return analyse_frame(frame) - return False - - -def prepare_frame(frame : Frame) -> Frame: - frame = cv2.resize(frame, (224, 224)).astype(numpy.float32) - frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32) - frame = numpy.expand_dims(frame, axis = 0) - return frame - - -def analyse_frame(frame : Frame) -> bool: - content_analyser = get_content_analyser() - frame = prepare_frame(frame) - probability = content_analyser.run(None, - { - 'input:0': frame - })[0][0][1] - return probability > MAX_PROBABILITY - - -@lru_cache(maxsize = None) -def analyse_image(image_path : str) -> bool: - frame = read_image(image_path) - return analyse_frame(frame) - - -@lru_cache(maxsize = None) -def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool: - video_frame_total = count_video_frame_total(video_path) - fps = detect_fps(video_path) - frame_range = range(start_frame or 0, end_frame or video_frame_total) - rate = 0.0 - counter = 0 - with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = DeepFakeAI.globals.log_level in [ 'warn', 'error' ]) as progress: - for frame_number in frame_range: - if frame_number % int(fps) == 0: - frame = get_video_frame(video_path, frame_number) - if analyse_frame(frame): - counter += 1 - rate = counter * int(fps) / len(frame_range) * 100 - progress.update() - progress.set_postfix(rate = rate) - return rate > MAX_RATE diff --git a/DeepFakeAI/core.py b/DeepFakeAI/core.py deleted file mode 100644 index 95a7066d70b1c470b421a86a5bbabb392e9f4455..0000000000000000000000000000000000000000 --- a/DeepFakeAI/core.py +++ /dev/null @@ -1,299 +0,0 @@ -import os - -os.environ['OMP_NUM_THREADS'] = '1' - -import signal -import ssl -import sys -import warnings -import platform -import shutil -import onnxruntime -from argparse import ArgumentParser, HelpFormatter - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI.face_analyser import get_one_face, get_average_face -from DeepFakeAI.face_store import get_reference_faces, append_reference_face -from DeepFakeAI.vision import get_video_frame, detect_fps, read_image, read_static_images -from DeepFakeAI import face_analyser, face_masker, content_analyser, metadata, logger, wording -from DeepFakeAI.content_analyser import analyse_image, analyse_video -from DeepFakeAI.processors.frame.core import get_frame_processors_modules, load_frame_processor_module -from DeepFakeAI.common_helper import create_metavar -from DeepFakeAI.execution_helper import encode_execution_providers, decode_execution_providers -from DeepFakeAI.normalizer import normalize_output_path, normalize_padding -from DeepFakeAI.filesystem import is_image, is_video, list_module_names, get_temp_frame_paths, create_temp, move_temp, clear_temp -from DeepFakeAI.ffmpeg import extract_frames, compress_image, merge_video, restore_audio - -onnxruntime.set_default_logger_severity(3) -warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio') -warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision') - -if platform.system().lower() == 'darwin': - ssl._create_default_https_context = ssl._create_unverified_context - - -def cli() -> None: - signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) - program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False) - # general - program.add_argument('-s', '--source', action = 'append', help = wording.get('source_help'), dest = 'source_paths') - program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path') - program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path') - program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') - # misc - group_misc = program.add_argument_group('misc') - group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), action = 'store_true') - group_misc.add_argument('--headless', help = wording.get('headless_help'), action = 'store_true') - group_misc.add_argument('--log-level', help = wording.get('log_level_help'), default = 'info', choices = logger.get_log_levels()) - # execution - execution_providers = encode_execution_providers(onnxruntime.get_available_providers()) - group_execution = program.add_argument_group('execution') - group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = ', '.join(execution_providers)), default = [ 'cpu' ], choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS') - group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), type = int, default = 4, choices = DeepFakeAI.choices.execution_thread_count_range, metavar = create_metavar(DeepFakeAI.choices.execution_thread_count_range)) - group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), type = int, default = 1, choices = DeepFakeAI.choices.execution_queue_count_range, metavar = create_metavar(DeepFakeAI.choices.execution_queue_count_range)) - group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), type = int, choices = DeepFakeAI.choices.max_memory_range, metavar = create_metavar(DeepFakeAI.choices.max_memory_range)) - # face analyser - group_face_analyser = program.add_argument_group('face analyser') - group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), default = 'left-right', choices = DeepFakeAI.choices.face_analyser_orders) - group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), choices = DeepFakeAI.choices.face_analyser_ages) - group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), choices = DeepFakeAI.choices.face_analyser_genders) - group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), default = 'retinaface', choices = DeepFakeAI.choices.face_detector_models) - group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), default = '640x640', choices = DeepFakeAI.choices.face_detector_sizes) - group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), type = float, default = 0.5, choices = DeepFakeAI.choices.face_detector_score_range, metavar = create_metavar(DeepFakeAI.choices.face_detector_score_range)) - # face selector - group_face_selector = program.add_argument_group('face selector') - group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), default = 'reference', choices = DeepFakeAI.choices.face_selector_modes) - group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), type = int, default = 0) - group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), type = float, default = 0.6, choices = DeepFakeAI.choices.reference_face_distance_range, metavar = create_metavar(DeepFakeAI.choices.reference_face_distance_range)) - group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), type = int, default = 0) - # face mask - group_face_mask = program.add_argument_group('face mask') - group_face_mask.add_argument('--face-mask-types', help = wording.get('face_mask_types_help').format(choices = ', '.join(DeepFakeAI.choices.face_mask_types)), default = [ 'box' ], choices = DeepFakeAI.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES') - group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), type = float, default = 0.3, choices = DeepFakeAI.choices.face_mask_blur_range, metavar = create_metavar(DeepFakeAI.choices.face_mask_blur_range)) - group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), type = int, default = [ 0, 0, 0, 0 ], nargs = '+') - group_face_mask.add_argument('--face-mask-regions', help = wording.get('face_mask_regions_help').format(choices = ', '.join(DeepFakeAI.choices.face_mask_regions)), default = DeepFakeAI.choices.face_mask_regions, choices = DeepFakeAI.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS') - # frame extraction - group_frame_extraction = program.add_argument_group('frame extraction') - group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), type = int) - group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), type = int) - group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), default = 'jpg', choices = DeepFakeAI.choices.temp_frame_formats) - group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), type = int, default = 100, choices = DeepFakeAI.choices.temp_frame_quality_range, metavar = create_metavar(DeepFakeAI.choices.temp_frame_quality_range)) - group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), action = 'store_true') - # output creation - group_output_creation = program.add_argument_group('output creation') - group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), type = int, default = 80, choices = DeepFakeAI.choices.output_image_quality_range, metavar = create_metavar(DeepFakeAI.choices.output_image_quality_range)) - group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), default = 'libx264', choices = DeepFakeAI.choices.output_video_encoders) - group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), type = int, default = 80, choices = DeepFakeAI.choices.output_video_quality_range, metavar = create_metavar(DeepFakeAI.choices.output_video_quality_range)) - group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), action = 'store_true') - group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), action = 'store_true') - # frame processors - available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules') - program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True) - group_frame_processors = program.add_argument_group('frame processors') - group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), default = [ 'face_swapper' ], nargs = '+') - for frame_processor in available_frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - frame_processor_module.register_args(group_frame_processors) - # uis - group_uis = program.add_argument_group('uis') - group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('DeepFakeAI/uis/layouts'))), default = [ 'default' ], nargs = '+') - run(program) - - -def apply_args(program : ArgumentParser) -> None: - args = program.parse_args() - # general - DeepFakeAI.globals.source_paths = args.source_paths - DeepFakeAI.globals.target_path = args.target_path - DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.target_path, args.output_path) - # misc - DeepFakeAI.globals.skip_download = args.skip_download - DeepFakeAI.globals.headless = args.headless - DeepFakeAI.globals.log_level = args.log_level - # execution - DeepFakeAI.globals.execution_providers = decode_execution_providers(args.execution_providers) - DeepFakeAI.globals.execution_thread_count = args.execution_thread_count - DeepFakeAI.globals.execution_queue_count = args.execution_queue_count - DeepFakeAI.globals.max_memory = args.max_memory - # face analyser - DeepFakeAI.globals.face_analyser_order = args.face_analyser_order - DeepFakeAI.globals.face_analyser_age = args.face_analyser_age - DeepFakeAI.globals.face_analyser_gender = args.face_analyser_gender - DeepFakeAI.globals.face_detector_model = args.face_detector_model - DeepFakeAI.globals.face_detector_size = args.face_detector_size - DeepFakeAI.globals.face_detector_score = args.face_detector_score - # face selector - DeepFakeAI.globals.face_selector_mode = args.face_selector_mode - DeepFakeAI.globals.reference_face_position = args.reference_face_position - DeepFakeAI.globals.reference_face_distance = args.reference_face_distance - DeepFakeAI.globals.reference_frame_number = args.reference_frame_number - # face mask - DeepFakeAI.globals.face_mask_types = args.face_mask_types - DeepFakeAI.globals.face_mask_blur = args.face_mask_blur - DeepFakeAI.globals.face_mask_padding = normalize_padding(args.face_mask_padding) - DeepFakeAI.globals.face_mask_regions = args.face_mask_regions - # frame extraction - DeepFakeAI.globals.trim_frame_start = args.trim_frame_start - DeepFakeAI.globals.trim_frame_end = args.trim_frame_end - DeepFakeAI.globals.temp_frame_format = args.temp_frame_format - DeepFakeAI.globals.temp_frame_quality = args.temp_frame_quality - DeepFakeAI.globals.keep_temp = args.keep_temp - # output creation - DeepFakeAI.globals.output_image_quality = args.output_image_quality - DeepFakeAI.globals.output_video_encoder = args.output_video_encoder - DeepFakeAI.globals.output_video_quality = args.output_video_quality - DeepFakeAI.globals.keep_fps = args.keep_fps - DeepFakeAI.globals.skip_audio = args.skip_audio - # frame processors - available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules') - DeepFakeAI.globals.frame_processors = args.frame_processors - for frame_processor in available_frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - frame_processor_module.apply_args(program) - # uis - DeepFakeAI.globals.ui_layouts = args.ui_layouts - - -def run(program : ArgumentParser) -> None: - apply_args(program) - logger.init(DeepFakeAI.globals.log_level) - limit_resources() - if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check(): - return - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - if not frame_processor_module.pre_check(): - return - if DeepFakeAI.globals.headless: - conditional_process() - else: - import DeepFakeAI.uis.core as ui - - for ui_layout in ui.get_ui_layouts_modules(DeepFakeAI.globals.ui_layouts): - if not ui_layout.pre_check(): - return - ui.launch() - - -def destroy() -> None: - if DeepFakeAI.globals.target_path: - clear_temp(DeepFakeAI.globals.target_path) - sys.exit() - - -def limit_resources() -> None: - if DeepFakeAI.globals.max_memory: - memory = DeepFakeAI.globals.max_memory * 1024 ** 3 - if platform.system().lower() == 'darwin': - memory = DeepFakeAI.globals.max_memory * 1024 ** 6 - if platform.system().lower() == 'windows': - import ctypes - - kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined] - kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) - else: - import resource - - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) - - -def pre_check() -> bool: - if sys.version_info < (3, 9): - logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper()) - return False - if not shutil.which('ffmpeg'): - logger.error(wording.get('ffmpeg_not_installed'), __name__.upper()) - return False - return True - - -def conditional_process() -> None: - conditional_append_reference_faces() - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - if not frame_processor_module.pre_process('output'): - return - if is_image(DeepFakeAI.globals.target_path): - process_image() - if is_video(DeepFakeAI.globals.target_path): - process_video() - - -def conditional_append_reference_faces() -> None: - if 'reference' in DeepFakeAI.globals.face_selector_mode and not get_reference_faces(): - source_frames = read_static_images(DeepFakeAI.globals.source_paths) - source_face = get_average_face(source_frames) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - else: - reference_frame = read_image(DeepFakeAI.globals.target_path) - reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position) - append_reference_face('origin', reference_face) - if source_face and reference_face: - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame) - reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position) - append_reference_face(frame_processor_module.__name__, reference_face) - - -def process_image() -> None: - if analyse_image(DeepFakeAI.globals.target_path): - return - shutil.copy2(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - # process frame - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - logger.info(wording.get('processing'), frame_processor_module.NAME) - frame_processor_module.process_image(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.output_path, DeepFakeAI.globals.output_path) - frame_processor_module.post_process() - # compress image - logger.info(wording.get('compressing_image'), __name__.upper()) - if not compress_image(DeepFakeAI.globals.output_path): - logger.error(wording.get('compressing_image_failed'), __name__.upper()) - # validate image - if is_image(DeepFakeAI.globals.output_path): - logger.info(wording.get('processing_image_succeed'), __name__.upper()) - else: - logger.error(wording.get('processing_image_failed'), __name__.upper()) - - -def process_video() -> None: - if analyse_video(DeepFakeAI.globals.target_path, DeepFakeAI.globals.trim_frame_start, DeepFakeAI.globals.trim_frame_end): - return - fps = detect_fps(DeepFakeAI.globals.target_path) if DeepFakeAI.globals.keep_fps else 25.0 - # create temp - logger.info(wording.get('creating_temp'), __name__.upper()) - create_temp(DeepFakeAI.globals.target_path) - # extract frames - logger.info(wording.get('extracting_frames_fps').format(fps = fps), __name__.upper()) - extract_frames(DeepFakeAI.globals.target_path, fps) - # process frame - temp_frame_paths = get_temp_frame_paths(DeepFakeAI.globals.target_path) - if temp_frame_paths: - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - logger.info(wording.get('processing'), frame_processor_module.NAME) - frame_processor_module.process_video(DeepFakeAI.globals.source_paths, temp_frame_paths) - frame_processor_module.post_process() - else: - logger.error(wording.get('temp_frames_not_found'), __name__.upper()) - return - # merge video - logger.info(wording.get('merging_video_fps').format(fps = fps), __name__.upper()) - if not merge_video(DeepFakeAI.globals.target_path, fps): - logger.error(wording.get('merging_video_failed'), __name__.upper()) - return - # handle audio - if DeepFakeAI.globals.skip_audio: - logger.info(wording.get('skipping_audio'), __name__.upper()) - move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - else: - logger.info(wording.get('restoring_audio'), __name__.upper()) - if not restore_audio(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path): - logger.warn(wording.get('restoring_audio_skipped'), __name__.upper()) - move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - # clear temp - logger.info(wording.get('clearing_temp'), __name__.upper()) - clear_temp(DeepFakeAI.globals.target_path) - # validate video - if is_video(DeepFakeAI.globals.output_path): - logger.info(wording.get('processing_video_succeed'), __name__.upper()) - else: - logger.error(wording.get('processing_video_failed'), __name__.upper()) diff --git a/DeepFakeAI/download.py b/DeepFakeAI/download.py deleted file mode 100644 index e8b9d6ff37959049d09fe63b2498a81e36f3bc21..0000000000000000000000000000000000000000 --- a/DeepFakeAI/download.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import subprocess -import urllib.request -from typing import List -from concurrent.futures import ThreadPoolExecutor -from functools import lru_cache -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.filesystem import is_file - - -def conditional_download(download_directory_path : str, urls : List[str]) -> None: - with ThreadPoolExecutor() as executor: - for url in urls: - executor.submit(get_download_size, url) - for url in urls: - download_file_path = os.path.join(download_directory_path, os.path.basename(url)) - initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0 - total = get_download_size(url) - if initial < total: - with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = DeepFakeAI.globals.log_level in [ 'warn', 'error' ]) as progress: - subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) - current = initial - while current < total: - if is_file(download_file_path): - current = os.path.getsize(download_file_path) - progress.update(current - progress.n) - - -@lru_cache(maxsize = None) -def get_download_size(url : str) -> int: - try: - response = urllib.request.urlopen(url, timeout = 10) - return int(response.getheader('Content-Length')) - except (OSError, ValueError): - return 0 - - -def is_download_done(url : str, file_path : str) -> bool: - if is_file(file_path): - return get_download_size(url) == os.path.getsize(file_path) - return False diff --git a/DeepFakeAI/execution_helper.py b/DeepFakeAI/execution_helper.py deleted file mode 100644 index 9c66865a84c6dc8fa7893e6c2f099a62daaed85e..0000000000000000000000000000000000000000 --- a/DeepFakeAI/execution_helper.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List -import onnxruntime - - -def encode_execution_providers(execution_providers : List[str]) -> List[str]: - return [ execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers ] - - -def decode_execution_providers(execution_providers: List[str]) -> List[str]: - available_execution_providers = onnxruntime.get_available_providers() - encoded_execution_providers = encode_execution_providers(available_execution_providers) - return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ] - - -def map_device(execution_providers : List[str]) -> str: - if 'CoreMLExecutionProvider' in execution_providers: - return 'mps' - if 'CUDAExecutionProvider' in execution_providers or 'ROCMExecutionProvider' in execution_providers : - return 'cuda' - if 'OpenVINOExecutionProvider' in execution_providers: - return 'mkl' - return 'cpu' diff --git a/DeepFakeAI/face_analyser.py b/DeepFakeAI/face_analyser.py deleted file mode 100644 index 611fc6aee2458dfa24fa4e6d713c3a6a6be03e85..0000000000000000000000000000000000000000 --- a/DeepFakeAI/face_analyser.py +++ /dev/null @@ -1,347 +0,0 @@ -from typing import Any, Optional, List, Tuple -import threading -import cv2 -import numpy -import onnxruntime - -import DeepFakeAI.globals -from DeepFakeAI.download import conditional_download -from DeepFakeAI.face_store import get_static_faces, set_static_faces -from DeepFakeAI.face_helper import warp_face, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms -from DeepFakeAI.filesystem import resolve_relative_path -from DeepFakeAI.typing import Frame, Face, FaceSet, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelSet, Bbox, Kps, Score, Embedding -from DeepFakeAI.vision import resize_frame_dimension - -FACE_ANALYSER = None -THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() -THREAD_LOCK : threading.Lock = threading.Lock() -MODELS : ModelSet =\ -{ - 'face_detector_retinaface': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/retinaface_10g.onnx', - 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx') - }, - 'face_detector_yunet': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/yunet_2023mar.onnx', - 'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx') - }, - 'face_recognizer_arcface_blendswap': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/arcface_w600k_r50.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') - }, - 'face_recognizer_arcface_inswapper': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/arcface_w600k_r50.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') - }, - 'face_recognizer_arcface_simswap': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/arcface_simswap.onnx', - 'path': resolve_relative_path('../.assets/models/arcface_simswap.onnx') - }, - 'gender_age': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gender_age.onnx', - 'path': resolve_relative_path('../.assets/models/gender_age.onnx') - } -} - - -def get_face_analyser() -> Any: - global FACE_ANALYSER - - with THREAD_LOCK: - if FACE_ANALYSER is None: - if DeepFakeAI.globals.face_detector_model == 'retinaface': - face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = DeepFakeAI.globals.execution_providers) - if DeepFakeAI.globals.face_detector_model == 'yunet': - face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0)) - if DeepFakeAI.globals.face_recognizer_model == 'arcface_blendswap': - face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = DeepFakeAI.globals.execution_providers) - if DeepFakeAI.globals.face_recognizer_model == 'arcface_inswapper': - face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = DeepFakeAI.globals.execution_providers) - if DeepFakeAI.globals.face_recognizer_model == 'arcface_simswap': - face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = DeepFakeAI.globals.execution_providers) - gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = DeepFakeAI.globals.execution_providers) - FACE_ANALYSER =\ - { - 'face_detector': face_detector, - 'face_recognizer': face_recognizer, - 'gender_age': gender_age - } - return FACE_ANALYSER - - -def clear_face_analyser() -> Any: - global FACE_ANALYSER - - FACE_ANALYSER = None - - -def pre_check() -> bool: - if not DeepFakeAI.globals.skip_download: - download_directory_path = resolve_relative_path('../.assets/models') - model_urls =\ - [ - MODELS.get('face_detector_retinaface').get('url'), - MODELS.get('face_detector_yunet').get('url'), - MODELS.get('face_recognizer_arcface_inswapper').get('url'), - MODELS.get('face_recognizer_arcface_simswap').get('url'), - MODELS.get('gender_age').get('url') - ] - conditional_download(download_directory_path, model_urls) - return True - - -def extract_faces(frame: Frame) -> List[Face]: - face_detector_width, face_detector_height = map(int, DeepFakeAI.globals.face_detector_size.split('x')) - frame_height, frame_width, _ = frame.shape - temp_frame = resize_frame_dimension(frame, face_detector_width, face_detector_height) - temp_frame_height, temp_frame_width, _ = temp_frame.shape - ratio_height = frame_height / temp_frame_height - ratio_width = frame_width / temp_frame_width - if DeepFakeAI.globals.face_detector_model == 'retinaface': - bbox_list, kps_list, score_list = detect_with_retinaface(temp_frame, temp_frame_height, temp_frame_width, face_detector_height, face_detector_width, ratio_height, ratio_width) - return create_faces(frame, bbox_list, kps_list, score_list) - elif DeepFakeAI.globals.face_detector_model == 'yunet': - bbox_list, kps_list, score_list = detect_with_yunet(temp_frame, temp_frame_height, temp_frame_width, ratio_height, ratio_width) - return create_faces(frame, bbox_list, kps_list, score_list) - return [] - - -def detect_with_retinaface(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, face_detector_height : int, face_detector_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]: - face_detector = get_face_analyser().get('face_detector') - bbox_list = [] - kps_list = [] - score_list = [] - feature_strides = [ 8, 16, 32 ] - feature_map_channel = 3 - anchor_total = 2 - prepare_frame = numpy.zeros((face_detector_height, face_detector_width, 3)) - prepare_frame[:temp_frame_height, :temp_frame_width, :] = temp_frame - temp_frame = (prepare_frame - 127.5) / 128.0 - temp_frame = numpy.expand_dims(temp_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32) - with THREAD_SEMAPHORE: - detections = face_detector.run(None, - { - face_detector.get_inputs()[0].name: temp_frame - }) - for index, feature_stride in enumerate(feature_strides): - keep_indices = numpy.where(detections[index] >= DeepFakeAI.globals.face_detector_score)[0] - if keep_indices.any(): - stride_height = face_detector_height // feature_stride - stride_width = face_detector_width // feature_stride - anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width) - bbox_raw = (detections[index + feature_map_channel] * feature_stride) - kps_raw = detections[index + feature_map_channel * 2] * feature_stride - for bbox in distance_to_bbox(anchors, bbox_raw)[keep_indices]: - bbox_list.append(numpy.array( - [ - bbox[0] * ratio_width, - bbox[1] * ratio_height, - bbox[2] * ratio_width, - bbox[3] * ratio_height - ])) - for kps in distance_to_kps(anchors, kps_raw)[keep_indices]: - kps_list.append(kps * [ ratio_width, ratio_height ]) - for score in detections[index][keep_indices]: - score_list.append(score[0]) - return bbox_list, kps_list, score_list - - -def detect_with_yunet(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]: - face_detector = get_face_analyser().get('face_detector') - face_detector.setInputSize((temp_frame_width, temp_frame_height)) - face_detector.setScoreThreshold(DeepFakeAI.globals.face_detector_score) - bbox_list = [] - kps_list = [] - score_list = [] - with THREAD_SEMAPHORE: - _, detections = face_detector.detect(temp_frame) - if detections.any(): - for detection in detections: - bbox_list.append(numpy.array( - [ - detection[0] * ratio_width, - detection[1] * ratio_height, - (detection[0] + detection[2]) * ratio_width, - (detection[1] + detection[3]) * ratio_height - ])) - kps_list.append(detection[4:14].reshape((5, 2)) * [ ratio_width, ratio_height]) - score_list.append(detection[14]) - return bbox_list, kps_list, score_list - - -def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face]: - faces = [] - if DeepFakeAI.globals.face_detector_score > 0: - sort_indices = numpy.argsort(-numpy.array(score_list)) - bbox_list = [ bbox_list[index] for index in sort_indices ] - kps_list = [ kps_list[index] for index in sort_indices ] - score_list = [ score_list[index] for index in sort_indices ] - keep_indices = apply_nms(bbox_list, 0.4) - for index in keep_indices: - bbox = bbox_list[index] - kps = kps_list[index] - score = score_list[index] - embedding, normed_embedding = calc_embedding(frame, kps) - gender, age = detect_gender_age(frame, kps) - faces.append(Face( - bbox = bbox, - kps = kps, - score = score, - embedding = embedding, - normed_embedding = normed_embedding, - gender = gender, - age = age - )) - return faces - - -def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]: - face_recognizer = get_face_analyser().get('face_recognizer') - crop_frame, matrix = warp_face(temp_frame, kps, 'arcface_112_v2', (112, 112)) - crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1 - crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1) - crop_frame = numpy.expand_dims(crop_frame, axis = 0) - embedding = face_recognizer.run(None, - { - face_recognizer.get_inputs()[0].name: crop_frame - })[0] - embedding = embedding.ravel() - normed_embedding = embedding / numpy.linalg.norm(embedding) - return embedding, normed_embedding - - -def detect_gender_age(frame : Frame, kps : Kps) -> Tuple[int, int]: - gender_age = get_face_analyser().get('gender_age') - crop_frame, affine_matrix = warp_face(frame, kps, 'arcface_112_v2', (96, 96)) - crop_frame = numpy.expand_dims(crop_frame, axis = 0).transpose(0, 3, 1, 2).astype(numpy.float32) - prediction = gender_age.run(None, - { - gender_age.get_inputs()[0].name: crop_frame - })[0][0] - gender = int(numpy.argmax(prediction[:2])) - age = int(numpy.round(prediction[2] * 100)) - return gender, age - - -def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]: - many_faces = get_many_faces(frame) - if many_faces: - try: - return many_faces[position] - except IndexError: - return many_faces[-1] - return None - - -def get_average_face(frames : List[Frame], position : int = 0) -> Optional[Face]: - average_face = None - faces = [] - embedding_list = [] - normed_embedding_list = [] - for frame in frames: - face = get_one_face(frame, position) - if face: - faces.append(face) - embedding_list.append(face.embedding) - normed_embedding_list.append(face.normed_embedding) - if faces: - average_face = Face( - bbox = faces[0].bbox, - kps = faces[0].kps, - score = faces[0].score, - embedding = numpy.mean(embedding_list, axis = 0), - normed_embedding = numpy.mean(normed_embedding_list, axis = 0), - gender = faces[0].gender, - age = faces[0].age - ) - return average_face - - -def get_many_faces(frame : Frame) -> List[Face]: - try: - faces_cache = get_static_faces(frame) - if faces_cache: - faces = faces_cache - else: - faces = extract_faces(frame) - set_static_faces(frame, faces) - if DeepFakeAI.globals.face_analyser_order: - faces = sort_by_order(faces, DeepFakeAI.globals.face_analyser_order) - if DeepFakeAI.globals.face_analyser_age: - faces = filter_by_age(faces, DeepFakeAI.globals.face_analyser_age) - if DeepFakeAI.globals.face_analyser_gender: - faces = filter_by_gender(faces, DeepFakeAI.globals.face_analyser_gender) - return faces - except (AttributeError, ValueError): - return [] - - -def find_similar_faces(frame : Frame, reference_faces : FaceSet, face_distance : float) -> List[Face]: - similar_faces : List[Face] = [] - many_faces = get_many_faces(frame) - - if reference_faces: - for reference_set in reference_faces: - if not similar_faces: - for reference_face in reference_faces[reference_set]: - for face in many_faces: - if compare_faces(face, reference_face, face_distance): - similar_faces.append(face) - return similar_faces - - -def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool: - if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'): - current_face_distance = 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding) - return current_face_distance < face_distance - return False - - -def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]: - if order == 'left-right': - return sorted(faces, key = lambda face: face.bbox[0]) - if order == 'right-left': - return sorted(faces, key = lambda face: face.bbox[0], reverse = True) - if order == 'top-bottom': - return sorted(faces, key = lambda face: face.bbox[1]) - if order == 'bottom-top': - return sorted(faces, key = lambda face: face.bbox[1], reverse = True) - if order == 'small-large': - return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1])) - if order == 'large-small': - return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]), reverse = True) - if order == 'best-worst': - return sorted(faces, key = lambda face: face.score, reverse = True) - if order == 'worst-best': - return sorted(faces, key = lambda face: face.score) - return faces - - -def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]: - filter_faces = [] - for face in faces: - if face.age < 13 and age == 'child': - filter_faces.append(face) - elif face.age < 19 and age == 'teen': - filter_faces.append(face) - elif face.age < 60 and age == 'adult': - filter_faces.append(face) - elif face.age > 59 and age == 'senior': - filter_faces.append(face) - return filter_faces - - -def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]: - filter_faces = [] - for face in faces: - if face.gender == 0 and gender == 'female': - filter_faces.append(face) - if face.gender == 1 and gender == 'male': - filter_faces.append(face) - return filter_faces diff --git a/DeepFakeAI/face_helper.py b/DeepFakeAI/face_helper.py deleted file mode 100644 index 54488bae1a8c4dd4abe255c3a2812cf087b99810..0000000000000000000000000000000000000000 --- a/DeepFakeAI/face_helper.py +++ /dev/null @@ -1,111 +0,0 @@ -from typing import Any, Dict, Tuple, List -from cv2.typing import Size -from functools import lru_cache -import cv2 -import numpy - -from DeepFakeAI.typing import Bbox, Kps, Frame, Mask, Matrix, Template - -TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\ -{ - 'arcface_112_v1': numpy.array( - [ - [ 39.7300, 51.1380 ], - [ 72.2700, 51.1380 ], - [ 56.0000, 68.4930 ], - [ 42.4630, 87.0100 ], - [ 69.5370, 87.0100 ] - ]), - 'arcface_112_v2': numpy.array( - [ - [ 38.2946, 51.6963 ], - [ 73.5318, 51.5014 ], - [ 56.0252, 71.7366 ], - [ 41.5493, 92.3655 ], - [ 70.7299, 92.2041 ] - ]), - 'arcface_128_v2': numpy.array( - [ - [ 46.2946, 51.6963 ], - [ 81.5318, 51.5014 ], - [ 64.0252, 71.7366 ], - [ 49.5493, 92.3655 ], - [ 78.7299, 92.2041 ] - ]), - 'ffhq_512': numpy.array( - [ - [ 192.98138, 239.94708 ], - [ 318.90277, 240.1936 ], - [ 256.63416, 314.01935 ], - [ 201.26117, 371.41043 ], - [ 313.08905, 371.15118 ] - ]) -} - - -def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]: - normed_template = TEMPLATES.get(template) * size[1] / size[0] - affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0] - crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE) - return crop_frame, affine_matrix - - -def paste_back(temp_frame : Frame, crop_frame: Frame, crop_mask : Mask, affine_matrix : Matrix) -> Frame: - inverse_matrix = cv2.invertAffineTransform(affine_matrix) - temp_frame_size = temp_frame.shape[:2][::-1] - inverse_crop_mask = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size).clip(0, 1) - inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE) - paste_frame = temp_frame.copy() - paste_frame[:, :, 0] = inverse_crop_mask * inverse_crop_frame[:, :, 0] + (1 - inverse_crop_mask) * temp_frame[:, :, 0] - paste_frame[:, :, 1] = inverse_crop_mask * inverse_crop_frame[:, :, 1] + (1 - inverse_crop_mask) * temp_frame[:, :, 1] - paste_frame[:, :, 2] = inverse_crop_mask * inverse_crop_frame[:, :, 2] + (1 - inverse_crop_mask) * temp_frame[:, :, 2] - return paste_frame - - -@lru_cache(maxsize = None) -def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]: - y, x = numpy.mgrid[:stride_height, :stride_width][::-1] - anchors = numpy.stack((y, x), axis = -1) - anchors = (anchors * feature_stride).reshape((-1, 2)) - anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2)) - return anchors - - -def distance_to_bbox(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Bbox: - x1 = points[:, 0] - distance[:, 0] - y1 = points[:, 1] - distance[:, 1] - x2 = points[:, 0] + distance[:, 2] - y2 = points[:, 1] + distance[:, 3] - bbox = numpy.column_stack([ x1, y1, x2, y2 ]) - return bbox - - -def distance_to_kps(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Kps: - x = points[:, 0::2] + distance[:, 0::2] - y = points[:, 1::2] + distance[:, 1::2] - kps = numpy.stack((x, y), axis = -1) - return kps - - -def apply_nms(bbox_list : List[Bbox], iou_threshold : float) -> List[int]: - keep_indices = [] - dimension_list = numpy.reshape(bbox_list, (-1, 4)) - x1 = dimension_list[:, 0] - y1 = dimension_list[:, 1] - x2 = dimension_list[:, 2] - y2 = dimension_list[:, 3] - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - indices = numpy.arange(len(bbox_list)) - while indices.size > 0: - index = indices[0] - remain_indices = indices[1:] - keep_indices.append(index) - xx1 = numpy.maximum(x1[index], x1[remain_indices]) - yy1 = numpy.maximum(y1[index], y1[remain_indices]) - xx2 = numpy.minimum(x2[index], x2[remain_indices]) - yy2 = numpy.minimum(y2[index], y2[remain_indices]) - width = numpy.maximum(0, xx2 - xx1 + 1) - height = numpy.maximum(0, yy2 - yy1 + 1) - iou = width * height / (areas[index] + areas[remain_indices] - width * height) - indices = indices[numpy.where(iou <= iou_threshold)[0] + 1] - return keep_indices diff --git a/DeepFakeAI/face_masker.py b/DeepFakeAI/face_masker.py deleted file mode 100644 index 5518d0f9af70e2b36b305f605aac6723fdb27054..0000000000000000000000000000000000000000 --- a/DeepFakeAI/face_masker.py +++ /dev/null @@ -1,128 +0,0 @@ -from typing import Any, Dict, List -from cv2.typing import Size -from functools import lru_cache -import threading -import cv2 -import numpy -import onnxruntime - -import DeepFakeAI.globals -from DeepFakeAI.typing import Frame, Mask, Padding, FaceMaskRegion, ModelSet -from DeepFakeAI.filesystem import resolve_relative_path -from DeepFakeAI.download import conditional_download - -FACE_OCCLUDER = None -FACE_PARSER = None -THREAD_LOCK : threading.Lock = threading.Lock() -MODELS : ModelSet =\ -{ - 'face_occluder': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/face_occluder.onnx', - 'path': resolve_relative_path('../.assets/models/face_occluder.onnx') - }, - 'face_parser': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/face_parser.onnx', - 'path': resolve_relative_path('../.assets/models/face_parser.onnx') - } -} -FACE_MASK_REGIONS : Dict[FaceMaskRegion, int] =\ -{ - 'skin': 1, - 'left-eyebrow': 2, - 'right-eyebrow': 3, - 'left-eye': 4, - 'right-eye': 5, - 'eye-glasses': 6, - 'nose': 10, - 'mouth': 11, - 'upper-lip': 12, - 'lower-lip': 13 -} - - -def get_face_occluder() -> Any: - global FACE_OCCLUDER - - with THREAD_LOCK: - if FACE_OCCLUDER is None: - model_path = MODELS.get('face_occluder').get('path') - FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers) - return FACE_OCCLUDER - - -def get_face_parser() -> Any: - global FACE_PARSER - - with THREAD_LOCK: - if FACE_PARSER is None: - model_path = MODELS.get('face_parser').get('path') - FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers) - return FACE_PARSER - - -def clear_face_occluder() -> None: - global FACE_OCCLUDER - - FACE_OCCLUDER = None - - -def clear_face_parser() -> None: - global FACE_PARSER - - FACE_PARSER = None - - -def pre_check() -> bool: - if not DeepFakeAI.globals.skip_download: - download_directory_path = resolve_relative_path('../.assets/models') - model_urls =\ - [ - MODELS.get('face_occluder').get('url'), - MODELS.get('face_parser').get('url'), - ] - conditional_download(download_directory_path, model_urls) - return True - - -@lru_cache(maxsize = None) -def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Mask: - blur_amount = int(crop_size[0] * 0.5 * face_mask_blur) - blur_area = max(blur_amount // 2, 1) - box_mask = numpy.ones(crop_size, numpy.float32) - box_mask[:max(blur_area, int(crop_size[1] * face_mask_padding[0] / 100)), :] = 0 - box_mask[-max(blur_area, int(crop_size[1] * face_mask_padding[2] / 100)):, :] = 0 - box_mask[:, :max(blur_area, int(crop_size[0] * face_mask_padding[3] / 100))] = 0 - box_mask[:, -max(blur_area, int(crop_size[0] * face_mask_padding[1] / 100)):] = 0 - if blur_amount > 0: - box_mask = cv2.GaussianBlur(box_mask, (0, 0), blur_amount * 0.25) - return box_mask - - -def create_occlusion_mask(crop_frame : Frame) -> Mask: - face_occluder = get_face_occluder() - prepare_frame = cv2.resize(crop_frame, face_occluder.get_inputs()[0].shape[1:3][::-1]) - prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32) / 255 - prepare_frame = prepare_frame.transpose(0, 1, 2, 3) - occlusion_mask = face_occluder.run(None, - { - face_occluder.get_inputs()[0].name: prepare_frame - })[0][0] - occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32) - occlusion_mask = cv2.resize(occlusion_mask, crop_frame.shape[:2][::-1]) - return occlusion_mask - - -def create_region_mask(crop_frame : Frame, face_mask_regions : List[FaceMaskRegion]) -> Mask: - face_parser = get_face_parser() - prepare_frame = cv2.flip(cv2.resize(crop_frame, (512, 512)), 1) - prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32)[:, :, ::-1] / 127.5 - 1 - prepare_frame = prepare_frame.transpose(0, 3, 1, 2) - region_mask = face_parser.run(None, - { - face_parser.get_inputs()[0].name: prepare_frame - })[0][0] - region_mask = numpy.isin(region_mask.argmax(0), [ FACE_MASK_REGIONS[region] for region in face_mask_regions ]) - region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_frame.shape[:2][::-1]) - return region_mask diff --git a/DeepFakeAI/face_store.py b/DeepFakeAI/face_store.py deleted file mode 100644 index 66889e036c325c45963b1ab688fc5f0fd36b89d2..0000000000000000000000000000000000000000 --- a/DeepFakeAI/face_store.py +++ /dev/null @@ -1,47 +0,0 @@ -from typing import Optional, List -import hashlib - -from DeepFakeAI.typing import Frame, Face, FaceStore, FaceSet - -FACE_STORE: FaceStore =\ -{ - 'static_faces': {}, - 'reference_faces': {} -} - - -def get_static_faces(frame : Frame) -> Optional[List[Face]]: - frame_hash = create_frame_hash(frame) - if frame_hash in FACE_STORE['static_faces']: - return FACE_STORE['static_faces'][frame_hash] - return None - - -def set_static_faces(frame : Frame, faces : List[Face]) -> None: - frame_hash = create_frame_hash(frame) - if frame_hash: - FACE_STORE['static_faces'][frame_hash] = faces - - -def clear_static_faces() -> None: - FACE_STORE['static_faces'] = {} - - -def create_frame_hash(frame: Frame) -> Optional[str]: - return hashlib.sha1(frame.tobytes()).hexdigest() if frame.any() else None - - -def get_reference_faces() -> Optional[FaceSet]: - if FACE_STORE['reference_faces']: - return FACE_STORE['reference_faces'] - return None - - -def append_reference_face(name : str, face : Face) -> None: - if name not in FACE_STORE['reference_faces']: - FACE_STORE['reference_faces'][name] = [] - FACE_STORE['reference_faces'][name].append(face) - - -def clear_reference_faces() -> None: - FACE_STORE['reference_faces'] = {} diff --git a/DeepFakeAI/ffmpeg.py b/DeepFakeAI/ffmpeg.py deleted file mode 100644 index 3f142b3af4364b28524ac5d8872d675660722acd..0000000000000000000000000000000000000000 --- a/DeepFakeAI/ffmpeg.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import List -import subprocess - -import DeepFakeAI.globals -from DeepFakeAI import logger -from DeepFakeAI.filesystem import get_temp_frames_pattern, get_temp_output_video_path -from DeepFakeAI.vision import detect_fps - - -def run_ffmpeg(args : List[str]) -> bool: - commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] - commands.extend(args) - try: - subprocess.run(commands, stderr = subprocess.PIPE, check = True) - return True - except subprocess.CalledProcessError as exception: - logger.debug(exception.stderr.decode().strip(), __name__.upper()) - return False - - -def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]: - commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] - commands.extend(args) - return subprocess.Popen(commands, stdin = subprocess.PIPE) - - -def extract_frames(target_path : str, fps : float) -> bool: - temp_frame_compression = round(31 - (DeepFakeAI.globals.temp_frame_quality * 0.31)) - trim_frame_start = DeepFakeAI.globals.trim_frame_start - trim_frame_end = DeepFakeAI.globals.trim_frame_end - temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d') - commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_compression), '-pix_fmt', 'rgb24' ] - if trim_frame_start is not None and trim_frame_end is not None: - commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) - elif trim_frame_start is not None: - commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ]) - elif trim_frame_end is not None: - commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) - else: - commands.extend([ '-vf', 'fps=' + str(fps) ]) - commands.extend([ '-vsync', '0', temp_frames_pattern ]) - return run_ffmpeg(commands) - - -def compress_image(output_path : str) -> bool: - output_image_compression = round(31 - (DeepFakeAI.globals.output_image_quality * 0.31)) - commands = [ '-hwaccel', 'auto', '-i', output_path, '-q:v', str(output_image_compression), '-y', output_path ] - return run_ffmpeg(commands) - - -def merge_video(target_path : str, fps : float) -> bool: - temp_output_video_path = get_temp_output_video_path(target_path) - temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d') - commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', temp_frames_pattern, '-c:v', DeepFakeAI.globals.output_video_encoder ] - if DeepFakeAI.globals.output_video_encoder in [ 'libx264', 'libx265' ]: - output_video_compression = round(51 - (DeepFakeAI.globals.output_video_quality * 0.51)) - commands.extend([ '-crf', str(output_video_compression) ]) - if DeepFakeAI.globals.output_video_encoder in [ 'libvpx-vp9' ]: - output_video_compression = round(63 - (DeepFakeAI.globals.output_video_quality * 0.63)) - commands.extend([ '-crf', str(output_video_compression) ]) - if DeepFakeAI.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: - output_video_compression = round(51 - (DeepFakeAI.globals.output_video_quality * 0.51)) - commands.extend([ '-cq', str(output_video_compression) ]) - commands.extend([ '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_output_video_path ]) - return run_ffmpeg(commands) - - -def restore_audio(target_path : str, output_path : str) -> bool: - fps = detect_fps(target_path) - trim_frame_start = DeepFakeAI.globals.trim_frame_start - trim_frame_end = DeepFakeAI.globals.trim_frame_end - temp_output_video_path = get_temp_output_video_path(target_path) - commands = [ '-hwaccel', 'auto', '-i', temp_output_video_path ] - if trim_frame_start is not None: - start_time = trim_frame_start / fps - commands.extend([ '-ss', str(start_time) ]) - if trim_frame_end is not None: - end_time = trim_frame_end / fps - commands.extend([ '-to', str(end_time) ]) - commands.extend([ '-i', target_path, '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ]) - return run_ffmpeg(commands) diff --git a/DeepFakeAI/filesystem.py b/DeepFakeAI/filesystem.py deleted file mode 100644 index cf29d19ce01bab55b81ca1255dd87f99f881766c..0000000000000000000000000000000000000000 --- a/DeepFakeAI/filesystem.py +++ /dev/null @@ -1,91 +0,0 @@ -from typing import List, Optional -import glob -import os -import shutil -import tempfile -import filetype -from pathlib import Path - -import DeepFakeAI.globals - -TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'DeepFakeAI') -TEMP_OUTPUT_VIDEO_NAME = 'temp.mp4' - - -def get_temp_frame_paths(target_path : str) -> List[str]: - temp_frames_pattern = get_temp_frames_pattern(target_path, '*') - return sorted(glob.glob(temp_frames_pattern)) - - -def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str: - temp_directory_path = get_temp_directory_path(target_path) - return os.path.join(temp_directory_path, temp_frame_prefix + '.' + DeepFakeAI.globals.temp_frame_format) - - -def get_temp_directory_path(target_path : str) -> str: - target_name, _ = os.path.splitext(os.path.basename(target_path)) - return os.path.join(TEMP_DIRECTORY_PATH, target_name) - - -def get_temp_output_video_path(target_path : str) -> str: - temp_directory_path = get_temp_directory_path(target_path) - return os.path.join(temp_directory_path, TEMP_OUTPUT_VIDEO_NAME) - - -def create_temp(target_path : str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - Path(temp_directory_path).mkdir(parents = True, exist_ok = True) - - -def move_temp(target_path : str, output_path : str) -> None: - temp_output_video_path = get_temp_output_video_path(target_path) - if is_file(temp_output_video_path): - if is_file(output_path): - os.remove(output_path) - shutil.move(temp_output_video_path, output_path) - - -def clear_temp(target_path : str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - parent_directory_path = os.path.dirname(temp_directory_path) - if not DeepFakeAI.globals.keep_temp and is_directory(temp_directory_path): - shutil.rmtree(temp_directory_path) - if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): - os.rmdir(parent_directory_path) - - -def is_file(file_path : str) -> bool: - return bool(file_path and os.path.isfile(file_path)) - - -def is_directory(directory_path : str) -> bool: - return bool(directory_path and os.path.isdir(directory_path)) - - -def is_image(image_path : str) -> bool: - if is_file(image_path): - return filetype.helpers.is_image(image_path) - return False - - -def are_images(image_paths : List[str]) -> bool: - if image_paths: - return all(is_image(image_path) for image_path in image_paths) - return False - - -def is_video(video_path : str) -> bool: - if is_file(video_path): - return filetype.helpers.is_video(video_path) - return False - - -def resolve_relative_path(path : str) -> str: - return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) - - -def list_module_names(path : str) -> Optional[List[str]]: - if os.path.exists(path): - files = os.listdir(path) - return [ Path(file).stem for file in files if not Path(file).stem.startswith(('.', '__')) ] - return None diff --git a/DeepFakeAI/globals.py b/DeepFakeAI/globals.py deleted file mode 100644 index 572bfa63fce23c7a19ab2f604ec193880496a0c7..0000000000000000000000000000000000000000 --- a/DeepFakeAI/globals.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing import List, Optional - -from DeepFakeAI.typing import LogLevel, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding - -# general -source_paths : Optional[List[str]] = None -target_path : Optional[str] = None -output_path : Optional[str] = None -# misc -skip_download : Optional[bool] = None -headless : Optional[bool] = None -log_level : Optional[LogLevel] = None -# execution -execution_providers : List[str] = [] -execution_thread_count : Optional[int] = None -execution_queue_count : Optional[int] = None -max_memory : Optional[int] = None -# face analyser -face_analyser_order : Optional[FaceAnalyserOrder] = None -face_analyser_age : Optional[FaceAnalyserAge] = None -face_analyser_gender : Optional[FaceAnalyserGender] = None -face_detector_model : Optional[FaceDetectorModel] = None -face_detector_size : Optional[str] = None -face_detector_score : Optional[float] = None -face_recognizer_model : Optional[FaceRecognizerModel] = None -# face selector -face_selector_mode : Optional[FaceSelectorMode] = None -reference_face_position : Optional[int] = None -reference_face_distance : Optional[float] = None -reference_frame_number : Optional[int] = None -# face mask -face_mask_types : Optional[List[FaceMaskType]] = None -face_mask_blur : Optional[float] = None -face_mask_padding : Optional[Padding] = None -face_mask_regions : Optional[List[FaceMaskRegion]] = None -# frame extraction -trim_frame_start : Optional[int] = None -trim_frame_end : Optional[int] = None -temp_frame_format : Optional[TempFrameFormat] = None -temp_frame_quality : Optional[int] = None -keep_temp : Optional[bool] = None -# output creation -output_image_quality : Optional[int] = None -output_video_encoder : Optional[OutputVideoEncoder] = None -output_video_quality : Optional[int] = None -keep_fps : Optional[bool] = None -skip_audio : Optional[bool] = None -# frame processors -frame_processors : List[str] = [] -# uis -ui_layouts : List[str] = [] diff --git a/DeepFakeAI/installer.py b/DeepFakeAI/installer.py deleted file mode 100644 index c233f4b4c914820180450c7f4e7ba9744e7cc2e6..0000000000000000000000000000000000000000 --- a/DeepFakeAI/installer.py +++ /dev/null @@ -1,92 +0,0 @@ -from typing import Dict, Tuple -import sys -import os -import platform -import tempfile -import subprocess -from argparse import ArgumentParser, HelpFormatter - -subprocess.call([ 'pip', 'install' , 'inquirer', '-q' ]) - -import inquirer - -from DeepFakeAI import metadata, wording - -TORCH : Dict[str, str] =\ -{ - 'default': 'default', - 'cpu': 'cpu' -} -ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\ -{ - 'default': ('onnxruntime', '1.16.3') -} -if platform.system().lower() == 'linux' or platform.system().lower() == 'windows': - TORCH['cuda'] = 'cu118' - TORCH['cuda-nightly'] = 'cu121' - ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.16.3') - ONNXRUNTIMES['cuda-nightly'] = ('ort-nightly-gpu', '1.17.0.dev20231205004') - ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.16.0') -if platform.system().lower() == 'linux': - TORCH['rocm'] = 'rocm5.6' - ONNXRUNTIMES['rocm'] = ('onnxruntime-rocm', '1.16.3') -if platform.system().lower() == 'darwin': - ONNXRUNTIMES['coreml-legacy'] = ('onnxruntime-coreml', '1.13.1') - ONNXRUNTIMES['coreml-silicon'] = ('onnxruntime-silicon', '1.16.0') -if platform.system().lower() == 'windows': - ONNXRUNTIMES['directml'] = ('onnxruntime-directml', '1.16.3') - - -def cli() -> None: - program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120)) - program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), choices = TORCH.keys()) - program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys()) - program.add_argument('--skip-venv', help = wording.get('skip_venv_help'), action = 'store_true') - program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') - run(program) - - -def run(program : ArgumentParser) -> None: - args = program.parse_args() - python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor) - - if not args.skip_venv: - os.environ['PIP_REQUIRE_VIRTUALENV'] = '1' - if args.torch and args.onnxruntime: - answers =\ - { - 'torch': args.torch, - 'onnxruntime': args.onnxruntime - } - else: - answers = inquirer.prompt( - [ - inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())), - inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys())) - ]) - if answers: - torch = answers['torch'] - torch_wheel = TORCH[torch] - onnxruntime = answers['onnxruntime'] - onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime] - - subprocess.call([ 'pip', 'uninstall', 'torch', '-y', '-q' ]) - if torch_wheel == 'default': - subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ]) - else: - subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ]) - if onnxruntime == 'rocm': - if python_id in [ 'cp39', 'cp310', 'cp311' ]: - wheel_name = 'onnxruntime_training-' + onnxruntime_version + '+rocm56-' + python_id + '-' + python_id + '-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' - wheel_path = os.path.join(tempfile.gettempdir(), wheel_name) - wheel_url = 'https://download.onnxruntime.ai/' + wheel_name - subprocess.call([ 'curl', '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ]) - subprocess.call([ 'pip', 'uninstall', wheel_path, '-y', '-q' ]) - subprocess.call([ 'pip', 'install', wheel_path ]) - os.remove(wheel_path) - else: - subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ]) - if onnxruntime == 'cuda-nightly': - subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-12-nightly/pypi/simple' ]) - else: - subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ]) diff --git a/DeepFakeAI/logger.py b/DeepFakeAI/logger.py deleted file mode 100644 index f24f6154ea34a4f5162411d3ff69792e560c0825..0000000000000000000000000000000000000000 --- a/DeepFakeAI/logger.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import Dict -from logging import basicConfig, getLogger, Logger, DEBUG, INFO, WARNING, ERROR - -from DeepFakeAI.typing import LogLevel - - -def init(log_level : LogLevel) -> None: - basicConfig(format = None) - get_package_logger().setLevel(get_log_levels()[log_level]) - - -def get_package_logger() -> Logger: - return getLogger('DeepFakeAI') - - -def debug(message : str, scope : str) -> None: - get_package_logger().debug('[' + scope + '] ' + message) - - -def info(message : str, scope : str) -> None: - get_package_logger().info('[' + scope + '] ' + message) - - -def warn(message : str, scope : str) -> None: - get_package_logger().warning('[' + scope + '] ' + message) - - -def error(message : str, scope : str) -> None: - get_package_logger().error('[' + scope + '] ' + message) - - -def get_log_levels() -> Dict[LogLevel, int]: - return\ - { - 'error': ERROR, - 'warn': WARNING, - 'info': INFO, - 'debug': DEBUG - } diff --git a/DeepFakeAI/metadata.py b/DeepFakeAI/metadata.py deleted file mode 100644 index 8f7e36cd7874e977db44f8c7a703617c6de3d692..0000000000000000000000000000000000000000 --- a/DeepFakeAI/metadata.py +++ /dev/null @@ -1,13 +0,0 @@ -METADATA =\ -{ - 'name': 'FaceFusion', - 'description': 'Next generation face swapper and enhancer', - 'version': '2.1.3', - 'license': 'MIT', - 'author': 'Henry Ruhs', - 'url': 'https://DeepFakeAI.io' -} - - -def get(key : str) -> str: - return METADATA[key] diff --git a/DeepFakeAI/normalizer.py b/DeepFakeAI/normalizer.py deleted file mode 100644 index 4e2868dfc8c3fbdfb025c78b915f6405e8d709ae..0000000000000000000000000000000000000000 --- a/DeepFakeAI/normalizer.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import List, Optional -import os - -from DeepFakeAI.filesystem import is_file, is_directory -from DeepFakeAI.typing import Padding - - -def normalize_output_path(source_paths : List[str], target_path : str, output_path : str) -> Optional[str]: - if is_file(target_path) and is_directory(output_path): - target_name, target_extension = os.path.splitext(os.path.basename(target_path)) - if source_paths and is_file(source_paths[0]): - source_name, _ = os.path.splitext(os.path.basename(source_paths[0])) - return os.path.join(output_path, source_name + '-' + target_name + target_extension) - return os.path.join(output_path, target_name + target_extension) - if is_file(target_path) and output_path: - _, target_extension = os.path.splitext(os.path.basename(target_path)) - output_name, output_extension = os.path.splitext(os.path.basename(output_path)) - output_directory_path = os.path.dirname(output_path) - if is_directory(output_directory_path) and output_extension: - return os.path.join(output_directory_path, output_name + target_extension) - return None - return output_path - - -def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]: - if padding and len(padding) == 1: - return tuple([ padding[0], padding[0], padding[0], padding[0] ]) # type: ignore[return-value] - if padding and len(padding) == 2: - return tuple([ padding[0], padding[1], padding[0], padding[1] ]) # type: ignore[return-value] - if padding and len(padding) == 3: - return tuple([ padding[0], padding[1], padding[2], padding[1] ]) # type: ignore[return-value] - if padding and len(padding) == 4: - return tuple(padding) # type: ignore[return-value] - return None diff --git a/DeepFakeAI/processors/__init__.py b/DeepFakeAI/processors/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/processors/frame/__init__.py b/DeepFakeAI/processors/frame/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/processors/frame/choices.py b/DeepFakeAI/processors/frame/choices.py deleted file mode 100644 index ac4e3d76d42e753d0233f890a50865eee96f1e90..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/choices.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import List -import numpy - -from DeepFakeAI.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem - -face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ] -face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ] -frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ] - -face_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist() -frame_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist() - -face_debugger_items : List[FaceDebuggerItem] = [ 'bbox', 'kps', 'face-mask', 'score' ] diff --git a/DeepFakeAI/processors/frame/core.py b/DeepFakeAI/processors/frame/core.py deleted file mode 100644 index b66a7d0c45b88145c75ff9836cc0faa69679d447..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/core.py +++ /dev/null @@ -1,98 +0,0 @@ -import sys -import importlib -from concurrent.futures import ThreadPoolExecutor, as_completed -from queue import Queue -from types import ModuleType -from typing import Any, List -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI.typing import Process_Frames -from DeepFakeAI.execution_helper import encode_execution_providers -from DeepFakeAI import logger, wording - -FRAME_PROCESSORS_MODULES : List[ModuleType] = [] -FRAME_PROCESSORS_METHODS =\ -[ - 'get_frame_processor', - 'clear_frame_processor', - 'get_options', - 'set_options', - 'register_args', - 'apply_args', - 'pre_check', - 'pre_process', - 'get_reference_frame', - 'process_frame', - 'process_frames', - 'process_image', - 'process_video', - 'post_process' -] - - -def load_frame_processor_module(frame_processor : str) -> Any: - try: - frame_processor_module = importlib.import_module('DeepFakeAI.processors.frame.modules.' + frame_processor) - for method_name in FRAME_PROCESSORS_METHODS: - if not hasattr(frame_processor_module, method_name): - raise NotImplementedError - except ModuleNotFoundError as exception: - logger.debug(exception.msg, __name__.upper()) - sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor)) - except NotImplementedError: - sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor)) - return frame_processor_module - - -def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]: - global FRAME_PROCESSORS_MODULES - - if not FRAME_PROCESSORS_MODULES: - for frame_processor in frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - FRAME_PROCESSORS_MODULES.append(frame_processor_module) - return FRAME_PROCESSORS_MODULES - - -def clear_frame_processors_modules() -> None: - global FRAME_PROCESSORS_MODULES - - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - frame_processor_module.clear_frame_processor() - FRAME_PROCESSORS_MODULES = [] - - -def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : Process_Frames) -> None: - with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = DeepFakeAI.globals.log_level in [ 'warn', 'error' ]) as progress: - progress.set_postfix( - { - 'execution_providers': encode_execution_providers(DeepFakeAI.globals.execution_providers), - 'execution_thread_count': DeepFakeAI.globals.execution_thread_count, - 'execution_queue_count': DeepFakeAI.globals.execution_queue_count - }) - with ThreadPoolExecutor(max_workers = DeepFakeAI.globals.execution_thread_count) as executor: - futures = [] - queue_temp_frame_paths : Queue[str] = create_queue(temp_frame_paths) - queue_per_future = max(len(temp_frame_paths) // DeepFakeAI.globals.execution_thread_count * DeepFakeAI.globals.execution_queue_count, 1) - while not queue_temp_frame_paths.empty(): - payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future) - future = executor.submit(process_frames, source_paths, payload_temp_frame_paths, progress.update) - futures.append(future) - for future_done in as_completed(futures): - future_done.result() - - -def create_queue(temp_frame_paths : List[str]) -> Queue[str]: - queue : Queue[str] = Queue() - for frame_path in temp_frame_paths: - queue.put(frame_path) - return queue - - -def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]: - queues = [] - for _ in range(queue_per_future): - if not queue.empty(): - queues.append(queue.get()) - return queues diff --git a/DeepFakeAI/processors/frame/globals.py b/DeepFakeAI/processors/frame/globals.py deleted file mode 100644 index 83bc1b18864a00c237f6b763a2ac60a65a77f97c..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/globals.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import List, Optional - -from DeepFakeAI.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem - -face_swapper_model : Optional[FaceSwapperModel] = None -face_enhancer_model : Optional[FaceEnhancerModel] = None -face_enhancer_blend : Optional[int] = None -frame_enhancer_model : Optional[FrameEnhancerModel] = None -frame_enhancer_blend : Optional[int] = None -face_debugger_items : Optional[List[FaceDebuggerItem]] = None diff --git a/DeepFakeAI/processors/frame/modules/__init__.py b/DeepFakeAI/processors/frame/modules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/processors/frame/modules/face_debugger.py b/DeepFakeAI/processors/frame/modules/face_debugger.py deleted file mode 100644 index 429ae45f527af259be02882b48aa8936e99e28bc..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/modules/face_debugger.py +++ /dev/null @@ -1,142 +0,0 @@ -from typing import Any, List, Literal -from argparse import ArgumentParser -import cv2 -import numpy - -import DeepFakeAI.globals -import DeepFakeAI.processors.frame.core as frame_processors -from DeepFakeAI import wording -from DeepFakeAI.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser -from DeepFakeAI.face_store import get_reference_faces -from DeepFakeAI.content_analyser import clear_content_analyser -from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode -from DeepFakeAI.vision import read_image, read_static_image, read_static_images, write_image -from DeepFakeAI.face_helper import warp_face -from DeepFakeAI.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser -from DeepFakeAI.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices - -NAME = __name__.upper() - - -def get_frame_processor() -> None: - pass - - -def clear_frame_processor() -> None: - pass - - -def get_options(key : Literal['model']) -> None: - pass - - -def set_options(key : Literal['model'], value : Any) -> None: - pass - - -def register_args(program : ArgumentParser) -> None: - program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = [ 'kps', 'face-mask' ], choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS') - - -def apply_args(program : ArgumentParser) -> None: - args = program.parse_args() - frame_processors_globals.face_debugger_items = args.face_debugger_items - - -def pre_check() -> bool: - return True - - -def pre_process(mode : ProcessMode) -> bool: - return True - - -def post_process() -> None: - clear_frame_processor() - clear_face_analyser() - clear_content_analyser() - clear_face_occluder() - clear_face_parser() - read_static_image.cache_clear() - - -def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: - primary_color = (0, 0, 255) - secondary_color = (0, 255, 0) - bounding_box = target_face.bbox.astype(numpy.int32) - if 'bbox' in frame_processors_globals.face_debugger_items: - cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2) - if 'face-mask' in frame_processors_globals.face_debugger_items: - crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, 'arcface_128_v2', (128, 512)) - inverse_matrix = cv2.invertAffineTransform(affine_matrix) - temp_frame_size = temp_frame.shape[:2][::-1] - crop_mask_list = [] - if 'box' in DeepFakeAI.globals.face_mask_types: - crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], 0, DeepFakeAI.globals.face_mask_padding)) - if 'occlusion' in DeepFakeAI.globals.face_mask_types: - crop_mask_list.append(create_occlusion_mask(crop_frame)) - if 'region' in DeepFakeAI.globals.face_mask_types: - crop_mask_list.append(create_region_mask(crop_frame, DeepFakeAI.globals.face_mask_regions)) - crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) - crop_mask = (crop_mask * 255).astype(numpy.uint8) - inverse_mask_frame = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size) - inverse_mask_frame_edges = cv2.threshold(inverse_mask_frame, 100, 255, cv2.THRESH_BINARY)[1] - inverse_mask_frame_edges[inverse_mask_frame_edges > 0] = 255 - inverse_mask_contours = cv2.findContours(inverse_mask_frame_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0] - cv2.drawContours(temp_frame, inverse_mask_contours, -1, primary_color, 2) - if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60: - if 'kps' in frame_processors_globals.face_debugger_items: - kps = target_face.kps.astype(numpy.int32) - for index in range(kps.shape[0]): - cv2.circle(temp_frame, (kps[index][0], kps[index][1]), 3, primary_color, -1) - if 'score' in frame_processors_globals.face_debugger_items: - score_text = str(round(target_face.score, 2)) - score_position = (bounding_box[0] + 10, bounding_box[1] + 20) - cv2.putText(temp_frame, score_text, score_position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, secondary_color, 2) - return temp_frame - - -def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: - pass - - -def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame: - if 'reference' in DeepFakeAI.globals.face_selector_mode: - similar_faces = find_similar_faces(temp_frame, reference_faces, DeepFakeAI.globals.reference_face_distance) - if similar_faces: - for similar_face in similar_faces: - temp_frame = debug_face(source_face, similar_face, temp_frame) - if 'one' in DeepFakeAI.globals.face_selector_mode: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = debug_face(source_face, target_face, temp_frame) - if 'many' in DeepFakeAI.globals.face_selector_mode: - many_faces = get_many_faces(temp_frame) - if many_faces: - for target_face in many_faces: - temp_frame = debug_face(source_face, target_face, temp_frame) - return temp_frame - - -def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None: - source_frames = read_static_images(source_paths) - source_face = get_average_face(source_frames) - reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - for temp_frame_path in temp_frame_paths: - temp_frame = read_image(temp_frame_path) - result_frame = process_frame(source_face, reference_faces, temp_frame) - write_image(temp_frame_path, result_frame) - update_progress() - - -def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: - source_frames = read_static_images(source_paths) - source_face = get_average_face(source_frames) - target_frame = read_static_image(target_path) - reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - result_frame = process_frame(source_face, reference_faces, target_frame) - write_image(output_path, result_frame) - - -def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: - frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/processors/frame/modules/face_enhancer.py b/DeepFakeAI/processors/frame/modules/face_enhancer.py deleted file mode 100644 index 6cf580de09c76ec8d5723fc96a50482a0ef96161..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/modules/face_enhancer.py +++ /dev/null @@ -1,249 +0,0 @@ -from typing import Any, List, Literal, Optional -from argparse import ArgumentParser -import cv2 -import threading -import numpy -import onnxruntime - -import DeepFakeAI.globals -import DeepFakeAI.processors.frame.core as frame_processors -from DeepFakeAI import logger, wording -from DeepFakeAI.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face -from DeepFakeAI.face_helper import warp_face, paste_back -from DeepFakeAI.content_analyser import clear_content_analyser -from DeepFakeAI.face_store import get_reference_faces -from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel -from DeepFakeAI.common_helper import create_metavar -from DeepFakeAI.filesystem import is_file, is_image, is_video, resolve_relative_path -from DeepFakeAI.download import conditional_download, is_download_done -from DeepFakeAI.vision import read_image, read_static_image, write_image -from DeepFakeAI.processors.frame import globals as frame_processors_globals -from DeepFakeAI.processors.frame import choices as frame_processors_choices -from DeepFakeAI.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder - -FRAME_PROCESSOR = None -THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() -THREAD_LOCK : threading.Lock = threading.Lock() -NAME = __name__.upper() -MODELS : ModelSet =\ -{ - 'codeformer': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/codeformer.onnx', - 'path': resolve_relative_path('../.assets/models/codeformer.onnx'), - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.2': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gfpgan_1.2.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'), - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.3': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gfpgan_1.3.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'), - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gfpgan_1.4': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gfpgan_1.4.onnx', - 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'), - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'gpen_bfr_256': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gpen_bfr_256.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'), - 'template': 'arcface_128_v2', - 'size': (128, 256) - }, - 'gpen_bfr_512': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/gpen_bfr_512.onnx', - 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'), - 'template': 'ffhq_512', - 'size': (512, 512) - }, - 'restoreformer': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/restoreformer.onnx', - 'path': resolve_relative_path('../.assets/models/restoreformer.onnx'), - 'template': 'ffhq_512', - 'size': (512, 512) - } -} -OPTIONS : Optional[OptionsWithModel] = None - - -def get_frame_processor() -> Any: - global FRAME_PROCESSOR - - with THREAD_LOCK: - if FRAME_PROCESSOR is None: - model_path = get_options('model').get('path') - FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers) - return FRAME_PROCESSOR - - -def clear_frame_processor() -> None: - global FRAME_PROCESSOR - - FRAME_PROCESSOR = None - - -def get_options(key : Literal['model']) -> Any: - global OPTIONS - - if OPTIONS is None: - OPTIONS =\ - { - 'model': MODELS[frame_processors_globals.face_enhancer_model] - } - return OPTIONS.get(key) - - -def set_options(key : Literal['model'], value : Any) -> None: - global OPTIONS - - OPTIONS[key] = value - - -def register_args(program : ArgumentParser) -> None: - program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models) - program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range)) - - -def apply_args(program : ArgumentParser) -> None: - args = program.parse_args() - frame_processors_globals.face_enhancer_model = args.face_enhancer_model - frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend - - -def pre_check() -> bool: - if not DeepFakeAI.globals.skip_download: - download_directory_path = resolve_relative_path('../.assets/models') - model_url = get_options('model').get('url') - conditional_download(download_directory_path, [ model_url ]) - return True - - -def pre_process(mode : ProcessMode) -> bool: - model_url = get_options('model').get('url') - model_path = get_options('model').get('path') - if not DeepFakeAI.globals.skip_download and not is_download_done(model_url, model_path): - logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) - return False - elif not is_file(model_path): - logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) - return False - if mode in [ 'output', 'preview' ] and not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path): - logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) - return False - if mode == 'output' and not DeepFakeAI.globals.output_path: - logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) - return False - return True - - -def post_process() -> None: - clear_frame_processor() - clear_face_analyser() - clear_content_analyser() - clear_face_occluder() - read_static_image.cache_clear() - - -def enhance_face(target_face: Face, temp_frame: Frame) -> Frame: - frame_processor = get_frame_processor() - model_template = get_options('model').get('template') - model_size = get_options('model').get('size') - crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size) - crop_mask_list =\ - [ - create_static_box_mask(crop_frame.shape[:2][::-1], DeepFakeAI.globals.face_mask_blur, (0, 0, 0, 0)) - ] - if 'occlusion' in DeepFakeAI.globals.face_mask_types: - crop_mask_list.append(create_occlusion_mask(crop_frame)) - crop_frame = prepare_crop_frame(crop_frame) - frame_processor_inputs = {} - for frame_processor_input in frame_processor.get_inputs(): - if frame_processor_input.name == 'input': - frame_processor_inputs[frame_processor_input.name] = crop_frame - if frame_processor_input.name == 'weight': - frame_processor_inputs[frame_processor_input.name] = numpy.array([ 1 ], dtype = numpy.double) - with THREAD_SEMAPHORE: - crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0] - crop_frame = normalize_crop_frame(crop_frame) - crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) - paste_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix) - temp_frame = blend_frame(temp_frame, paste_frame) - return temp_frame - - -def prepare_crop_frame(crop_frame : Frame) -> Frame: - crop_frame = crop_frame[:, :, ::-1] / 255.0 - crop_frame = (crop_frame - 0.5) / 0.5 - crop_frame = numpy.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32) - return crop_frame - - -def normalize_crop_frame(crop_frame : Frame) -> Frame: - crop_frame = numpy.clip(crop_frame, -1, 1) - crop_frame = (crop_frame + 1) / 2 - crop_frame = crop_frame.transpose(1, 2, 0) - crop_frame = (crop_frame * 255.0).round() - crop_frame = crop_frame.astype(numpy.uint8)[:, :, ::-1] - return crop_frame - - -def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame: - face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100) - temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0) - return temp_frame - - -def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Optional[Frame]: - return enhance_face(target_face, temp_frame) - - -def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame: - if 'reference' in DeepFakeAI.globals.face_selector_mode: - similar_faces = find_similar_faces(temp_frame, reference_faces, DeepFakeAI.globals.reference_face_distance) - if similar_faces: - for similar_face in similar_faces: - temp_frame = enhance_face(similar_face, temp_frame) - if 'one' in DeepFakeAI.globals.face_selector_mode: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = enhance_face(target_face, temp_frame) - if 'many' in DeepFakeAI.globals.face_selector_mode: - many_faces = get_many_faces(temp_frame) - if many_faces: - for target_face in many_faces: - temp_frame = enhance_face(target_face, temp_frame) - return temp_frame - - -def process_frames(source_path : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None: - reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - for temp_frame_path in temp_frame_paths: - temp_frame = read_image(temp_frame_path) - result_frame = process_frame(None, reference_faces, temp_frame) - write_image(temp_frame_path, result_frame) - update_progress() - - -def process_image(source_path : str, target_path : str, output_path : str) -> None: - reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - target_frame = read_static_image(target_path) - result_frame = process_frame(None, reference_faces, target_frame) - write_image(output_path, result_frame) - - -def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: - frame_processors.multi_process_frames(None, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/processors/frame/modules/face_swapper.py b/DeepFakeAI/processors/frame/modules/face_swapper.py deleted file mode 100644 index 4541b574f4be91c8b9d9bb1ce7ca5724aa88337e..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/modules/face_swapper.py +++ /dev/null @@ -1,302 +0,0 @@ -from typing import Any, List, Literal, Optional -from argparse import ArgumentParser -import threading -import numpy -import onnx -import onnxruntime -from onnx import numpy_helper - -import DeepFakeAI.globals -import DeepFakeAI.processors.frame.core as frame_processors -from DeepFakeAI import logger, wording -from DeepFakeAI.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser -from DeepFakeAI.face_helper import warp_face, paste_back -from DeepFakeAI.face_store import get_reference_faces -from DeepFakeAI.content_analyser import clear_content_analyser -from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, Embedding -from DeepFakeAI.filesystem import is_file, is_image, are_images, is_video, resolve_relative_path -from DeepFakeAI.download import conditional_download, is_download_done -from DeepFakeAI.vision import read_image, read_static_image, read_static_images, write_image -from DeepFakeAI.processors.frame import globals as frame_processors_globals -from DeepFakeAI.processors.frame import choices as frame_processors_choices -from DeepFakeAI.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser - -FRAME_PROCESSOR = None -MODEL_MATRIX = None -THREAD_LOCK : threading.Lock = threading.Lock() -NAME = __name__.upper() -MODELS : ModelSet =\ -{ - 'blendswap_256': - { - 'type': 'blendswap', - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/blendswap_256.onnx', - 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx'), - 'template': 'ffhq_512', - 'size': (512, 256), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'inswapper_128': - { - 'type': 'inswapper', - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/inswapper_128.onnx', - 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'), - 'template': 'arcface_128_v2', - 'size': (128, 128), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'inswapper_128_fp16': - { - 'type': 'inswapper', - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/inswapper_128_fp16.onnx', - 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'), - 'template': 'arcface_128_v2', - 'size': (128, 128), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - }, - 'simswap_256': - { - 'type': 'simswap', - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/simswap_256.onnx', - 'path': resolve_relative_path('../.assets/models/simswap_256.onnx'), - 'template': 'arcface_112_v1', - 'size': (112, 256), - 'mean': [ 0.485, 0.456, 0.406 ], - 'standard_deviation': [ 0.229, 0.224, 0.225 ] - }, - 'simswap_512_unofficial': - { - 'type': 'simswap', - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/simswap_512_unofficial.onnx', - 'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'), - 'template': 'arcface_112_v1', - 'size': (112, 512), - 'mean': [ 0.0, 0.0, 0.0 ], - 'standard_deviation': [ 1.0, 1.0, 1.0 ] - } -} -OPTIONS : Optional[OptionsWithModel] = None - - -def get_frame_processor() -> Any: - global FRAME_PROCESSOR - - with THREAD_LOCK: - if FRAME_PROCESSOR is None: - model_path = get_options('model').get('path') - FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = DeepFakeAI.globals.execution_providers) - return FRAME_PROCESSOR - - -def clear_frame_processor() -> None: - global FRAME_PROCESSOR - - FRAME_PROCESSOR = None - - -def get_model_matrix() -> Any: - global MODEL_MATRIX - - with THREAD_LOCK: - if MODEL_MATRIX is None: - model_path = get_options('model').get('path') - model = onnx.load(model_path) - MODEL_MATRIX = numpy_helper.to_array(model.graph.initializer[-1]) - return MODEL_MATRIX - - -def clear_model_matrix() -> None: - global MODEL_MATRIX - - MODEL_MATRIX = None - - -def get_options(key : Literal['model']) -> Any: - global OPTIONS - - if OPTIONS is None: - OPTIONS =\ - { - 'model': MODELS[frame_processors_globals.face_swapper_model] - } - return OPTIONS.get(key) - - -def set_options(key : Literal['model'], value : Any) -> None: - global OPTIONS - - OPTIONS[key] = value - - -def register_args(program : ArgumentParser) -> None: - program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models) - - -def apply_args(program : ArgumentParser) -> None: - args = program.parse_args() - frame_processors_globals.face_swapper_model = args.face_swapper_model - if args.face_swapper_model == 'blendswap_256': - DeepFakeAI.globals.face_recognizer_model = 'arcface_blendswap' - if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16': - DeepFakeAI.globals.face_recognizer_model = 'arcface_inswapper' - if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial': - DeepFakeAI.globals.face_recognizer_model = 'arcface_simswap' - - -def pre_check() -> bool: - if not DeepFakeAI.globals.skip_download: - download_directory_path = resolve_relative_path('../.assets/models') - model_url = get_options('model').get('url') - conditional_download(download_directory_path, [ model_url ]) - return True - - -def pre_process(mode : ProcessMode) -> bool: - model_url = get_options('model').get('url') - model_path = get_options('model').get('path') - if not DeepFakeAI.globals.skip_download and not is_download_done(model_url, model_path): - logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) - return False - elif not is_file(model_path): - logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) - return False - if not are_images(DeepFakeAI.globals.source_paths): - logger.error(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME) - return False - for source_frame in read_static_images(DeepFakeAI.globals.source_paths): - if not get_one_face(source_frame): - logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME) - return False - if mode in [ 'output', 'preview' ] and not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path): - logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) - return False - if mode == 'output' and not DeepFakeAI.globals.output_path: - logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) - return False - return True - - -def post_process() -> None: - clear_frame_processor() - clear_model_matrix() - clear_face_analyser() - clear_content_analyser() - clear_face_occluder() - clear_face_parser() - read_static_image.cache_clear() - - -def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: - frame_processor = get_frame_processor() - model_template = get_options('model').get('template') - model_size = get_options('model').get('size') - model_type = get_options('model').get('type') - crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size) - crop_mask_list = [] - if 'box' in DeepFakeAI.globals.face_mask_types: - crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], DeepFakeAI.globals.face_mask_blur, DeepFakeAI.globals.face_mask_padding)) - if 'occlusion' in DeepFakeAI.globals.face_mask_types: - crop_mask_list.append(create_occlusion_mask(crop_frame)) - crop_frame = prepare_crop_frame(crop_frame) - frame_processor_inputs = {} - for frame_processor_input in frame_processor.get_inputs(): - if frame_processor_input.name == 'source': - if model_type == 'blendswap': - frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face) - else: - frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face) - if frame_processor_input.name == 'target': - frame_processor_inputs[frame_processor_input.name] = crop_frame - crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0] - crop_frame = normalize_crop_frame(crop_frame) - if 'region' in DeepFakeAI.globals.face_mask_types: - crop_mask_list.append(create_region_mask(crop_frame, DeepFakeAI.globals.face_mask_regions)) - crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) - temp_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix) - return temp_frame - - -def prepare_source_frame(source_face : Face) -> Frame: - source_frame = read_static_image(DeepFakeAI.globals.source_paths[0]) - source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_112_v2', (112, 112)) - source_frame = source_frame[:, :, ::-1] / 255.0 - source_frame = source_frame.transpose(2, 0, 1) - source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32) - return source_frame - - -def prepare_source_embedding(source_face : Face) -> Embedding: - model_type = get_options('model').get('type') - if model_type == 'inswapper': - model_matrix = get_model_matrix() - source_embedding = source_face.embedding.reshape((1, -1)) - source_embedding = numpy.dot(source_embedding, model_matrix) / numpy.linalg.norm(source_embedding) - else: - source_embedding = source_face.normed_embedding.reshape(1, -1) - return source_embedding - - -def prepare_crop_frame(crop_frame : Frame) -> Frame: - model_mean = get_options('model').get('mean') - model_standard_deviation = get_options('model').get('standard_deviation') - crop_frame = crop_frame[:, :, ::-1] / 255.0 - crop_frame = (crop_frame - model_mean) / model_standard_deviation - crop_frame = crop_frame.transpose(2, 0, 1) - crop_frame = numpy.expand_dims(crop_frame, axis = 0).astype(numpy.float32) - return crop_frame - - -def normalize_crop_frame(crop_frame : Frame) -> Frame: - crop_frame = crop_frame.transpose(1, 2, 0) - crop_frame = (crop_frame * 255.0).round() - crop_frame = crop_frame[:, :, ::-1].astype(numpy.uint8) - return crop_frame - - -def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: - return swap_face(source_face, target_face, temp_frame) - - -def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame: - if 'reference' in DeepFakeAI.globals.face_selector_mode: - similar_faces = find_similar_faces(temp_frame, reference_faces, DeepFakeAI.globals.reference_face_distance) - if similar_faces: - for similar_face in similar_faces: - temp_frame = swap_face(source_face, similar_face, temp_frame) - if 'one' in DeepFakeAI.globals.face_selector_mode: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = swap_face(source_face, target_face, temp_frame) - if 'many' in DeepFakeAI.globals.face_selector_mode: - many_faces = get_many_faces(temp_frame) - if many_faces: - for target_face in many_faces: - temp_frame = swap_face(source_face, target_face, temp_frame) - return temp_frame - - -def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None: - source_frames = read_static_images(source_paths) - source_face = get_average_face(source_frames) - reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - for temp_frame_path in temp_frame_paths: - temp_frame = read_image(temp_frame_path) - result_frame = process_frame(source_face, reference_faces, temp_frame) - write_image(temp_frame_path, result_frame) - update_progress() - - -def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: - source_frames = read_static_images(source_paths) - source_face = get_average_face(source_frames) - reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - target_frame = read_static_image(target_path) - result_frame = process_frame(source_face, reference_faces, target_frame) - write_image(output_path, result_frame) - - -def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: - frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/processors/frame/modules/frame_enhancer.py b/DeepFakeAI/processors/frame/modules/frame_enhancer.py deleted file mode 100644 index 5b3b75f5e9613460b8132a191e7450c29b667788..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/modules/frame_enhancer.py +++ /dev/null @@ -1,172 +0,0 @@ -from typing import Any, List, Literal, Optional -from argparse import ArgumentParser -import threading -import cv2 -from basicsr.archs.rrdbnet_arch import RRDBNet -from realesrgan import RealESRGANer - -import DeepFakeAI.globals -import DeepFakeAI.processors.frame.core as frame_processors -from DeepFakeAI import logger, wording -from DeepFakeAI.face_analyser import clear_face_analyser -from DeepFakeAI.content_analyser import clear_content_analyser -from DeepFakeAI.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel -from DeepFakeAI.common_helper import create_metavar -from DeepFakeAI.execution_helper import map_device -from DeepFakeAI.filesystem import is_file, resolve_relative_path -from DeepFakeAI.download import conditional_download, is_download_done -from DeepFakeAI.vision import read_image, read_static_image, write_image -from DeepFakeAI.processors.frame import globals as frame_processors_globals -from DeepFakeAI.processors.frame import choices as frame_processors_choices - -FRAME_PROCESSOR = None -THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() -THREAD_LOCK : threading.Lock = threading.Lock() -NAME = __name__.upper() -MODELS : ModelSet =\ -{ - 'real_esrgan_x2plus': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/real_esrgan_x2plus.pth', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x2plus.pth'), - 'scale': 2 - }, - 'real_esrgan_x4plus': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/real_esrgan_x4plus.pth', - 'path': resolve_relative_path('../.assets/models/real_esrgan_x4plus.pth'), - 'scale': 4 - }, - 'real_esrnet_x4plus': - { - 'url': 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/real_esrnet_x4plus.pth', - 'path': resolve_relative_path('../.assets/models/real_esrnet_x4plus.pth'), - 'scale': 4 - } -} -OPTIONS : Optional[OptionsWithModel] = None - - -def get_frame_processor() -> Any: - global FRAME_PROCESSOR - - with THREAD_LOCK: - if FRAME_PROCESSOR is None: - model_path = get_options('model').get('path') - model_scale = get_options('model').get('scale') - FRAME_PROCESSOR = RealESRGANer( - model_path = model_path, - model = RRDBNet( - num_in_ch = 3, - num_out_ch = 3, - scale = model_scale - ), - device = map_device(DeepFakeAI.globals.execution_providers), - scale = model_scale - ) - return FRAME_PROCESSOR - - -def clear_frame_processor() -> None: - global FRAME_PROCESSOR - - FRAME_PROCESSOR = None - - -def get_options(key : Literal['model']) -> Any: - global OPTIONS - - if OPTIONS is None: - OPTIONS =\ - { - 'model': MODELS[frame_processors_globals.frame_enhancer_model] - } - return OPTIONS.get(key) - - -def set_options(key : Literal['model'], value : Any) -> None: - global OPTIONS - - OPTIONS[key] = value - - -def register_args(program : ArgumentParser) -> None: - program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'real_esrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models) - program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range)) - - -def apply_args(program : ArgumentParser) -> None: - args = program.parse_args() - frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model - frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend - - -def pre_check() -> bool: - if not DeepFakeAI.globals.skip_download: - download_directory_path = resolve_relative_path('../.assets/models') - model_url = get_options('model').get('url') - conditional_download(download_directory_path, [ model_url ]) - return True - - -def pre_process(mode : ProcessMode) -> bool: - model_url = get_options('model').get('url') - model_path = get_options('model').get('path') - if not DeepFakeAI.globals.skip_download and not is_download_done(model_url, model_path): - logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) - return False - elif not is_file(model_path): - logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) - return False - if mode == 'output' and not DeepFakeAI.globals.output_path: - logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) - return False - return True - - -def post_process() -> None: - clear_frame_processor() - clear_face_analyser() - clear_content_analyser() - read_static_image.cache_clear() - - -def enhance_frame(temp_frame : Frame) -> Frame: - with THREAD_SEMAPHORE: - paste_frame, _ = get_frame_processor().enhance(temp_frame) - temp_frame = blend_frame(temp_frame, paste_frame) - return temp_frame - - -def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame: - frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100) - paste_frame_height, paste_frame_width = paste_frame.shape[0:2] - temp_frame = cv2.resize(temp_frame, (paste_frame_width, paste_frame_height)) - temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, 1 - frame_enhancer_blend, 0) - return temp_frame - - -def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: - pass - - -def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame: - return enhance_frame(temp_frame) - - -def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = read_image(temp_frame_path) - result_frame = process_frame(None, None, temp_frame) - write_image(temp_frame_path, result_frame) - update_progress() - - -def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: - target_frame = read_static_image(target_path) - result = process_frame(None, None, target_frame) - write_image(output_path, result) - - -def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: - frame_processors.multi_process_frames(None, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/processors/frame/typings.py b/DeepFakeAI/processors/frame/typings.py deleted file mode 100644 index a397eef74f530a0f645ebd1d6737994719a9ba9e..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/typings.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Literal - -FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial'] -FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer'] -FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus'] - -FaceDebuggerItem = Literal['bbox', 'kps', 'face-mask', 'score'] diff --git a/DeepFakeAI/typing.py b/DeepFakeAI/typing.py deleted file mode 100644 index 2964040fb394a2a0f624f40e4f3aad0c12b38226..0000000000000000000000000000000000000000 --- a/DeepFakeAI/typing.py +++ /dev/null @@ -1,51 +0,0 @@ -from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict -from collections import namedtuple -import numpy - -Bbox = numpy.ndarray[Any, Any] -Kps = numpy.ndarray[Any, Any] -Score = float -Embedding = numpy.ndarray[Any, Any] -Face = namedtuple('Face', -[ - 'bbox', - 'kps', - 'score', - 'embedding', - 'normed_embedding', - 'gender', - 'age' -]) -FaceSet = Dict[str, List[Face]] -FaceStore = TypedDict('FaceStore', -{ - 'static_faces' : FaceSet, - 'reference_faces': FaceSet -}) -Frame = numpy.ndarray[Any, Any] -Mask = numpy.ndarray[Any, Any] -Matrix = numpy.ndarray[Any, Any] -Padding = Tuple[int, int, int, int] - -Update_Process = Callable[[], None] -Process_Frames = Callable[[List[str], List[str], Update_Process], None] -LogLevel = Literal['error', 'warn', 'info', 'debug'] -Template = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512'] -ProcessMode = Literal['output', 'preview', 'stream'] -FaceSelectorMode = Literal['reference', 'one', 'many'] -FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best'] -FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior'] -FaceAnalyserGender = Literal['male', 'female'] -FaceDetectorModel = Literal['retinaface', 'yunet'] -FaceRecognizerModel = Literal['arcface_blendswap', 'arcface_inswapper', 'arcface_simswap'] -FaceMaskType = Literal['box', 'occlusion', 'region'] -FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip'] -TempFrameFormat = Literal['jpg', 'png'] -OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc'] - -ModelValue = Dict[str, Any] -ModelSet = Dict[str, ModelValue] -OptionsWithModel = TypedDict('OptionsWithModel', -{ - 'model' : ModelValue -}) diff --git a/DeepFakeAI/uis/__init__.py b/DeepFakeAI/uis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/uis/assets/fixes.css b/DeepFakeAI/uis/assets/fixes.css deleted file mode 100644 index f65a7cfd3e3e34111a09a9100c6714ff49558615..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/assets/fixes.css +++ /dev/null @@ -1,7 +0,0 @@ -:root:root:root button:not([class]) -{ - border-radius: 0.375rem; - float: left; - overflow: hidden; - width: 100%; -} diff --git a/DeepFakeAI/uis/assets/overrides.css b/DeepFakeAI/uis/assets/overrides.css deleted file mode 100644 index 86ca371d5a146c1a28e5bc188f3771b6ebc1d263..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/assets/overrides.css +++ /dev/null @@ -1,44 +0,0 @@ -:root:root:root input[type="number"] -{ - max-width: 6rem; -} - -:root:root:root [type="checkbox"], -:root:root:root [type="radio"] -{ - border-radius: 50%; - height: 1.125rem; - width: 1.125rem; -} - -:root:root:root input[type="range"] -{ - height: 0.5rem; -} - -:root:root:root input[type="range"]::-moz-range-thumb, -:root:root:root input[type="range"]::-webkit-slider-thumb -{ - background: var(--neutral-300); - border: unset; - border-radius: 50%; - height: 1.125rem; - width: 1.125rem; -} - -:root:root:root input[type="range"]::-webkit-slider-thumb -{ - margin-top: 0.375rem; -} - -:root:root:root .grid-wrap.fixed-height -{ - min-height: unset; -} - -:root:root:root .grid-container -{ - grid-auto-rows: minmax(5em, 1fr); - grid-template-columns: repeat(var(--grid-cols), minmax(5em, 1fr)); - grid-template-rows: repeat(var(--grid-rows), minmax(5em, 1fr)); -} diff --git a/DeepFakeAI/uis/choices.py b/DeepFakeAI/uis/choices.py deleted file mode 100644 index 950467d0647d6f4d3ac0ea9702e30224154e5338..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/choices.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import List - -from DeepFakeAI.uis.typing import WebcamMode - -common_options : List[str] = [ 'keep-fps', 'keep-temp', 'skip-audio', 'skip-download' ] -webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ] -webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ] diff --git a/DeepFakeAI/uis/components/__init__.py b/DeepFakeAI/uis/components/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/uis/components/about.py b/DeepFakeAI/uis/components/about.py deleted file mode 100644 index 61217ae3228510d518a26fedf7e9285900551f89..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/about.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Optional -import gradio - -from DeepFakeAI import metadata, wording - -ABOUT_BUTTON : Optional[gradio.HTML] = None -DONATE_BUTTON : Optional[gradio.HTML] = None - - -def render() -> None: - global ABOUT_BUTTON - global DONATE_BUTTON - - ABOUT_BUTTON = gradio.Button( - value = metadata.get('name') + ' ' + metadata.get('version'), - variant = 'primary', - link = metadata.get('url') - ) - DONATE_BUTTON = gradio.Button( - value = wording.get('donate_button_label'), - link = 'https://donate.DeepFakeAI.io', - size = 'sm' - ) diff --git a/DeepFakeAI/uis/components/benchmark.py b/DeepFakeAI/uis/components/benchmark.py deleted file mode 100644 index 02c69ef99c6a813cd34a35ae1af7208f68f3baca..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/benchmark.py +++ /dev/null @@ -1,132 +0,0 @@ -from typing import Any, Optional, List, Dict, Generator -import time -import tempfile -import statistics -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.face_analyser import get_face_analyser -from DeepFakeAI.face_store import clear_static_faces -from DeepFakeAI.processors.frame.core import get_frame_processors_modules -from DeepFakeAI.vision import count_video_frame_total -from DeepFakeAI.core import limit_resources, conditional_process -from DeepFakeAI.normalizer import normalize_output_path -from DeepFakeAI.filesystem import clear_temp -from DeepFakeAI.uis.core import get_ui_component - -BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None -BENCHMARK_START_BUTTON : Optional[gradio.Button] = None -BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None -BENCHMARKS : Dict[str, str] =\ -{ - '240p': '.assets/examples/target-240p.mp4', - '360p': '.assets/examples/target-360p.mp4', - '540p': '.assets/examples/target-540p.mp4', - '720p': '.assets/examples/target-720p.mp4', - '1080p': '.assets/examples/target-1080p.mp4', - '1440p': '.assets/examples/target-1440p.mp4', - '2160p': '.assets/examples/target-2160p.mp4' -} - - -def render() -> None: - global BENCHMARK_RESULTS_DATAFRAME - global BENCHMARK_START_BUTTON - global BENCHMARK_CLEAR_BUTTON - - BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe( - label = wording.get('benchmark_results_dataframe_label'), - headers = - [ - 'target_path', - 'benchmark_cycles', - 'average_run', - 'fastest_run', - 'slowest_run', - 'relative_fps' - ], - datatype = - [ - 'str', - 'number', - 'number', - 'number', - 'number', - 'number' - ] - ) - BENCHMARK_START_BUTTON = gradio.Button( - value = wording.get('start_button_label'), - variant = 'primary', - size = 'sm' - ) - BENCHMARK_CLEAR_BUTTON = gradio.Button( - value = wording.get('clear_button_label'), - size = 'sm' - ) - - -def listen() -> None: - benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group') - benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider') - if benchmark_runs_checkbox_group and benchmark_cycles_slider: - BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME) - BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME) - - -def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]: - DeepFakeAI.globals.source_paths = [ '.assets/examples/source.jpg' ] - target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ] - benchmark_results = [] - if target_paths: - pre_process() - for target_path in target_paths: - benchmark_results.append(benchmark(target_path, benchmark_cycles)) - yield benchmark_results - post_process() - - -def pre_process() -> None: - limit_resources() - get_face_analyser() - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - frame_processor_module.get_frame_processor() - - -def post_process() -> None: - clear_static_faces() - - -def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]: - process_times = [] - total_fps = 0.0 - for i in range(benchmark_cycles): - DeepFakeAI.globals.target_path = target_path - DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.target_path, tempfile.gettempdir()) - video_frame_total = count_video_frame_total(DeepFakeAI.globals.target_path) - start_time = time.perf_counter() - conditional_process() - end_time = time.perf_counter() - process_time = end_time - start_time - total_fps += video_frame_total / process_time - process_times.append(process_time) - average_run = round(statistics.mean(process_times), 2) - fastest_run = round(min(process_times), 2) - slowest_run = round(max(process_times), 2) - relative_fps = round(total_fps / benchmark_cycles, 2) - return\ - [ - DeepFakeAI.globals.target_path, - benchmark_cycles, - average_run, - fastest_run, - slowest_run, - relative_fps - ] - - -def clear() -> gradio.Dataframe: - if DeepFakeAI.globals.target_path: - clear_temp(DeepFakeAI.globals.target_path) - return gradio.Dataframe(value = None) diff --git a/DeepFakeAI/uis/components/benchmark_options.py b/DeepFakeAI/uis/components/benchmark_options.py deleted file mode 100644 index 0012ca0bbec6892ac47d165d28d8ce5a38638cdb..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/benchmark_options.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Optional -import gradio - -from DeepFakeAI import wording -from DeepFakeAI.uis.core import register_ui_component -from DeepFakeAI.uis.components.benchmark import BENCHMARKS - -BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None -BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None - - -def render() -> None: - global BENCHMARK_RUNS_CHECKBOX_GROUP - global BENCHMARK_CYCLES_SLIDER - - BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('benchmark_runs_checkbox_group_label'), - value = list(BENCHMARKS.keys()), - choices = list(BENCHMARKS.keys()) - ) - BENCHMARK_CYCLES_SLIDER = gradio.Slider( - label = wording.get('benchmark_cycles_slider_label'), - value = 3, - step = 1, - minimum = 1, - maximum = 10 - ) - register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP) - register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER) diff --git a/DeepFakeAI/uis/components/common_options.py b/DeepFakeAI/uis/components/common_options.py deleted file mode 100644 index 30cb9fcae5cb24327c63601d0eecd6c3c99cb4b1..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/common_options.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import Optional, List -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.uis import choices as uis_choices - -COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None - - -def render() -> None: - global COMMON_OPTIONS_CHECKBOX_GROUP - - value = [] - if DeepFakeAI.globals.keep_fps: - value.append('keep-fps') - if DeepFakeAI.globals.keep_temp: - value.append('keep-temp') - if DeepFakeAI.globals.skip_audio: - value.append('skip-audio') - if DeepFakeAI.globals.skip_download: - value.append('skip-download') - COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup( - label = wording.get('common_options_checkbox_group_label'), - choices = uis_choices.common_options, - value = value - ) - - -def listen() -> None: - COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP) - - -def update(common_options : List[str]) -> None: - DeepFakeAI.globals.keep_fps = 'keep-fps' in common_options - DeepFakeAI.globals.keep_temp = 'keep-temp' in common_options - DeepFakeAI.globals.skip_audio = 'skip-audio' in common_options - DeepFakeAI.globals.skip_download = 'skip-download' in common_options diff --git a/DeepFakeAI/uis/components/execution.py b/DeepFakeAI/uis/components/execution.py deleted file mode 100644 index 00f65747a6c1d9776c8a7d9a0c0a3737b549db06..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/execution.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import List, Optional -import gradio -import onnxruntime - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.face_analyser import clear_face_analyser -from DeepFakeAI.processors.frame.core import clear_frame_processors_modules -from DeepFakeAI.execution_helper import encode_execution_providers, decode_execution_providers - -EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None - - -def render() -> None: - global EXECUTION_PROVIDERS_CHECKBOX_GROUP - - EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('execution_providers_checkbox_group_label'), - choices = encode_execution_providers(onnxruntime.get_available_providers()), - value = encode_execution_providers(DeepFakeAI.globals.execution_providers) - ) - - -def listen() -> None: - EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP) - - -def update_execution_providers(execution_providers : List[str]) -> gradio.CheckboxGroup: - clear_face_analyser() - clear_frame_processors_modules() - if not execution_providers: - execution_providers = encode_execution_providers(onnxruntime.get_available_providers()) - DeepFakeAI.globals.execution_providers = decode_execution_providers(execution_providers) - return gradio.CheckboxGroup(value = execution_providers) diff --git a/DeepFakeAI/uis/components/execution_queue_count.py b/DeepFakeAI/uis/components/execution_queue_count.py deleted file mode 100644 index 4b88b30f810adff34eb883ac4cb5c5418bdba57f..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/execution_queue_count.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Optional -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording - -EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global EXECUTION_QUEUE_COUNT_SLIDER - - EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider( - label = wording.get('execution_queue_count_slider_label'), - value = DeepFakeAI.globals.execution_queue_count, - step = DeepFakeAI.choices.execution_queue_count_range[1] - DeepFakeAI.choices.execution_queue_count_range[0], - minimum = DeepFakeAI.choices.execution_queue_count_range[0], - maximum = DeepFakeAI.choices.execution_queue_count_range[-1] - ) - - -def listen() -> None: - EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER) - - -def update_execution_queue_count(execution_queue_count : int = 1) -> None: - DeepFakeAI.globals.execution_queue_count = execution_queue_count diff --git a/DeepFakeAI/uis/components/execution_thread_count.py b/DeepFakeAI/uis/components/execution_thread_count.py deleted file mode 100644 index b4ce2e98c1986a444a361a126019f738823e1ed3..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/execution_thread_count.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Optional -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording - -EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global EXECUTION_THREAD_COUNT_SLIDER - - EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider( - label = wording.get('execution_thread_count_slider_label'), - value = DeepFakeAI.globals.execution_thread_count, - step = DeepFakeAI.choices.execution_thread_count_range[1] - DeepFakeAI.choices.execution_thread_count_range[0], - minimum = DeepFakeAI.choices.execution_thread_count_range[0], - maximum = DeepFakeAI.choices.execution_thread_count_range[-1] - ) - - -def listen() -> None: - EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER) - - -def update_execution_thread_count(execution_thread_count : int = 1) -> None: - DeepFakeAI.globals.execution_thread_count = execution_thread_count - diff --git a/DeepFakeAI/uis/components/face_analyser.py b/DeepFakeAI/uis/components/face_analyser.py deleted file mode 100644 index b9a197c15c25569f341d04fdf8ef062c07f2cc11..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/face_analyser.py +++ /dev/null @@ -1,98 +0,0 @@ -from typing import Optional - -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording -from DeepFakeAI.typing import FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel -from DeepFakeAI.uis.core import register_ui_component - -FACE_ANALYSER_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_DETECTOR_SCORE_SLIDER : Optional[gradio.Slider] = None -FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None - - -def render() -> None: - global FACE_ANALYSER_ORDER_DROPDOWN - global FACE_ANALYSER_AGE_DROPDOWN - global FACE_ANALYSER_GENDER_DROPDOWN - global FACE_DETECTOR_SIZE_DROPDOWN - global FACE_DETECTOR_SCORE_SLIDER - global FACE_DETECTOR_MODEL_DROPDOWN - - with gradio.Row(): - FACE_ANALYSER_ORDER_DROPDOWN = gradio.Dropdown( - label = wording.get('face_analyser_order_dropdown_label'), - choices = DeepFakeAI.choices.face_analyser_orders, - value = DeepFakeAI.globals.face_analyser_order - ) - FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown( - label = wording.get('face_analyser_age_dropdown_label'), - choices = [ 'none' ] + DeepFakeAI.choices.face_analyser_ages, - value = DeepFakeAI.globals.face_analyser_age or 'none' - ) - FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown( - label = wording.get('face_analyser_gender_dropdown_label'), - choices = [ 'none' ] + DeepFakeAI.choices.face_analyser_genders, - value = DeepFakeAI.globals.face_analyser_gender or 'none' - ) - FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown( - label = wording.get('face_detector_model_dropdown_label'), - choices = DeepFakeAI.choices.face_detector_models, - value = DeepFakeAI.globals.face_detector_model - ) - FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown( - label = wording.get('face_detector_size_dropdown_label'), - choices = DeepFakeAI.choices.face_detector_sizes, - value = DeepFakeAI.globals.face_detector_size - ) - FACE_DETECTOR_SCORE_SLIDER = gradio.Slider( - label = wording.get('face_detector_score_slider_label'), - value = DeepFakeAI.globals.face_detector_score, - step = DeepFakeAI.choices.face_detector_score_range[1] - DeepFakeAI.choices.face_detector_score_range[0], - minimum = DeepFakeAI.choices.face_detector_score_range[0], - maximum = DeepFakeAI.choices.face_detector_score_range[-1] - ) - register_ui_component('face_analyser_order_dropdown', FACE_ANALYSER_ORDER_DROPDOWN) - register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN) - register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN) - register_ui_component('face_detector_model_dropdown', FACE_DETECTOR_MODEL_DROPDOWN) - register_ui_component('face_detector_size_dropdown', FACE_DETECTOR_SIZE_DROPDOWN) - register_ui_component('face_detector_score_slider', FACE_DETECTOR_SCORE_SLIDER) - - -def listen() -> None: - FACE_ANALYSER_ORDER_DROPDOWN.select(update_face_analyser_order, inputs = FACE_ANALYSER_ORDER_DROPDOWN) - FACE_ANALYSER_AGE_DROPDOWN.select(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN) - FACE_ANALYSER_GENDER_DROPDOWN.select(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN) - FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN) - FACE_DETECTOR_SIZE_DROPDOWN.select(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN) - FACE_DETECTOR_SCORE_SLIDER.change(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER) - - -def update_face_analyser_order(face_analyser_order : FaceAnalyserOrder) -> None: - DeepFakeAI.globals.face_analyser_order = face_analyser_order if face_analyser_order != 'none' else None - - -def update_face_analyser_age(face_analyser_age : FaceAnalyserAge) -> None: - DeepFakeAI.globals.face_analyser_age = face_analyser_age if face_analyser_age != 'none' else None - - -def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> None: - DeepFakeAI.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None - - -def update_face_detector_model(face_detector_model : FaceDetectorModel) -> None: - DeepFakeAI.globals.face_detector_model = face_detector_model - - -def update_face_detector_size(face_detector_size : str) -> None: - DeepFakeAI.globals.face_detector_size = face_detector_size - - -def update_face_detector_score(face_detector_score : float) -> None: - DeepFakeAI.globals.face_detector_score = face_detector_score diff --git a/DeepFakeAI/uis/components/face_masker.py b/DeepFakeAI/uis/components/face_masker.py deleted file mode 100644 index 24416ead4229dd9eadfd252e14ba956db52258c6..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/face_masker.py +++ /dev/null @@ -1,123 +0,0 @@ -from typing import Optional, Tuple, List -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording -from DeepFakeAI.typing import FaceMaskType, FaceMaskRegion -from DeepFakeAI.uis.core import register_ui_component - -FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None -FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None -FACE_MASK_BOX_GROUP : Optional[gradio.Group] = None -FACE_MASK_REGION_GROUP : Optional[gradio.Group] = None -FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None -FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None -FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None -FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None -FACE_MASK_REGION_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None - - -def render() -> None: - global FACE_MASK_TYPES_CHECKBOX_GROUP - global FACE_MASK_BLUR_SLIDER - global FACE_MASK_BOX_GROUP - global FACE_MASK_REGION_GROUP - global FACE_MASK_PADDING_TOP_SLIDER - global FACE_MASK_PADDING_RIGHT_SLIDER - global FACE_MASK_PADDING_BOTTOM_SLIDER - global FACE_MASK_PADDING_LEFT_SLIDER - global FACE_MASK_REGION_CHECKBOX_GROUP - - has_box_mask = 'box' in DeepFakeAI.globals.face_mask_types - has_region_mask = 'region' in DeepFakeAI.globals.face_mask_types - FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('face_mask_types_checkbox_group_label'), - choices = DeepFakeAI.choices.face_mask_types, - value = DeepFakeAI.globals.face_mask_types - ) - with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP: - FACE_MASK_BLUR_SLIDER = gradio.Slider( - label = wording.get('face_mask_blur_slider_label'), - step = DeepFakeAI.choices.face_mask_blur_range[1] - DeepFakeAI.choices.face_mask_blur_range[0], - minimum = DeepFakeAI.choices.face_mask_blur_range[0], - maximum = DeepFakeAI.choices.face_mask_blur_range[-1], - value = DeepFakeAI.globals.face_mask_blur - ) - with gradio.Row(): - FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider( - label = wording.get('face_mask_padding_top_slider_label'), - step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0], - minimum = DeepFakeAI.choices.face_mask_padding_range[0], - maximum = DeepFakeAI.choices.face_mask_padding_range[-1], - value = DeepFakeAI.globals.face_mask_padding[0] - ) - FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider( - label = wording.get('face_mask_padding_right_slider_label'), - step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0], - minimum = DeepFakeAI.choices.face_mask_padding_range[0], - maximum = DeepFakeAI.choices.face_mask_padding_range[-1], - value = DeepFakeAI.globals.face_mask_padding[1] - ) - with gradio.Row(): - FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider( - label = wording.get('face_mask_padding_bottom_slider_label'), - step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0], - minimum = DeepFakeAI.choices.face_mask_padding_range[0], - maximum = DeepFakeAI.choices.face_mask_padding_range[-1], - value = DeepFakeAI.globals.face_mask_padding[2] - ) - FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider( - label = wording.get('face_mask_padding_left_slider_label'), - step = DeepFakeAI.choices.face_mask_padding_range[1] - DeepFakeAI.choices.face_mask_padding_range[0], - minimum = DeepFakeAI.choices.face_mask_padding_range[0], - maximum = DeepFakeAI.choices.face_mask_padding_range[-1], - value = DeepFakeAI.globals.face_mask_padding[3] - ) - with gradio.Row(): - FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('face_mask_region_checkbox_group_label'), - choices = DeepFakeAI.choices.face_mask_regions, - value = DeepFakeAI.globals.face_mask_regions, - visible = has_region_mask - ) - register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP) - register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER) - register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER) - register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER) - register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER) - register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER) - register_ui_component('face_mask_region_checkbox_group', FACE_MASK_REGION_CHECKBOX_GROUP) - - -def listen() -> None: - FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ]) - FACE_MASK_BLUR_SLIDER.change(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER) - FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP) - face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ] - for face_mask_padding_slider in face_mask_padding_sliders: - face_mask_padding_slider.change(update_face_mask_padding, inputs = face_mask_padding_sliders) - - -def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]: - if not face_mask_types: - face_mask_types = DeepFakeAI.choices.face_mask_types - DeepFakeAI.globals.face_mask_types = face_mask_types - has_box_mask = 'box' in face_mask_types - has_region_mask = 'region' in face_mask_types - return gradio.CheckboxGroup(value = face_mask_types), gradio.Group(visible = has_box_mask), gradio.CheckboxGroup(visible = has_region_mask) - - -def update_face_mask_blur(face_mask_blur : float) -> None: - DeepFakeAI.globals.face_mask_blur = face_mask_blur - - -def update_face_mask_padding(face_mask_padding_top : int, face_mask_padding_right : int, face_mask_padding_bottom : int, face_mask_padding_left : int) -> None: - DeepFakeAI.globals.face_mask_padding = (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left) - - -def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup: - if not face_mask_regions: - face_mask_regions = DeepFakeAI.choices.face_mask_regions - DeepFakeAI.globals.face_mask_regions = face_mask_regions - return gradio.CheckboxGroup(value = face_mask_regions) diff --git a/DeepFakeAI/uis/components/face_selector.py b/DeepFakeAI/uis/components/face_selector.py deleted file mode 100644 index 25e7ae79912422ae9783c73f7fd39b372bd02028..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/face_selector.py +++ /dev/null @@ -1,164 +0,0 @@ -from typing import List, Optional, Tuple, Any, Dict - -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording -from DeepFakeAI.face_store import clear_static_faces, clear_reference_faces -from DeepFakeAI.vision import get_video_frame, read_static_image, normalize_frame_color -from DeepFakeAI.face_analyser import get_many_faces -from DeepFakeAI.typing import Frame, FaceSelectorMode -from DeepFakeAI.filesystem import is_image, is_video -from DeepFakeAI.uis.core import get_ui_component, register_ui_component -from DeepFakeAI.uis.typing import ComponentName - -FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None -REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None -REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global FACE_SELECTOR_MODE_DROPDOWN - global REFERENCE_FACE_POSITION_GALLERY - global REFERENCE_FACE_DISTANCE_SLIDER - - reference_face_gallery_args: Dict[str, Any] =\ - { - 'label': wording.get('reference_face_gallery_label'), - 'object_fit': 'cover', - 'columns': 8, - 'allow_preview': False, - 'visible': 'reference' in DeepFakeAI.globals.face_selector_mode - } - if is_image(DeepFakeAI.globals.target_path): - reference_frame = read_static_image(DeepFakeAI.globals.target_path) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown( - label = wording.get('face_selector_mode_dropdown_label'), - choices = DeepFakeAI.choices.face_selector_modes, - value = DeepFakeAI.globals.face_selector_mode - ) - REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) - REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( - label = wording.get('reference_face_distance_slider_label'), - value = DeepFakeAI.globals.reference_face_distance, - step = DeepFakeAI.choices.reference_face_distance_range[1] - DeepFakeAI.choices.reference_face_distance_range[0], - minimum = DeepFakeAI.choices.reference_face_distance_range[0], - maximum = DeepFakeAI.choices.reference_face_distance_range[-1], - visible = 'reference' in DeepFakeAI.globals.face_selector_mode - ) - register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN) - register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) - register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) - - -def listen() -> None: - FACE_SELECTOR_MODE_DROPDOWN.select(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) - REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position) - REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) - multi_component_names : List[ComponentName] =\ - [ - 'target_image', - 'target_video' - ] - for component_name in multi_component_names: - component = get_ui_component(component_name) - if component: - for method in [ 'upload', 'change', 'clear' ]: - getattr(component, method)(update_reference_face_position) - getattr(component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) - change_one_component_names : List[ComponentName] =\ - [ - 'face_analyser_order_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown' - ] - for component_name in change_one_component_names: - component = get_ui_component(component_name) - if component: - component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) - change_two_component_names : List[ComponentName] =\ - [ - 'face_detector_model_dropdown', - 'face_detector_size_dropdown', - 'face_detector_score_slider' - ] - for component_name in change_two_component_names: - component = get_ui_component(component_name) - if component: - component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) - preview_frame_slider = get_ui_component('preview_frame_slider') - if preview_frame_slider: - preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider) - preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY) - - -def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]: - if face_selector_mode == 'reference': - DeepFakeAI.globals.face_selector_mode = face_selector_mode - return gradio.Gallery(visible = True), gradio.Slider(visible = True) - if face_selector_mode == 'one': - DeepFakeAI.globals.face_selector_mode = face_selector_mode - return gradio.Gallery(visible = False), gradio.Slider(visible = False) - if face_selector_mode == 'many': - DeepFakeAI.globals.face_selector_mode = face_selector_mode - return gradio.Gallery(visible = False), gradio.Slider(visible = False) - - -def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery: - clear_reference_faces() - clear_static_faces() - update_reference_face_position(event.index) - return update_reference_position_gallery() - - -def update_reference_face_position(reference_face_position : int = 0) -> None: - DeepFakeAI.globals.reference_face_position = reference_face_position - - -def update_reference_face_distance(reference_face_distance : float) -> None: - DeepFakeAI.globals.reference_face_distance = reference_face_distance - - -def update_reference_frame_number(reference_frame_number : int) -> None: - DeepFakeAI.globals.reference_frame_number = reference_frame_number - - -def clear_and_update_reference_position_gallery() -> gradio.Gallery: - clear_reference_faces() - clear_static_faces() - return update_reference_position_gallery() - - -def update_reference_position_gallery() -> gradio.Gallery: - gallery_frames = [] - if is_image(DeepFakeAI.globals.target_path): - reference_frame = read_static_image(DeepFakeAI.globals.target_path) - gallery_frames = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - gallery_frames = extract_gallery_frames(reference_frame) - if gallery_frames: - return gradio.Gallery(value = gallery_frames) - return gradio.Gallery(value = None) - - -def extract_gallery_frames(reference_frame : Frame) -> List[Frame]: - crop_frames = [] - faces = get_many_faces(reference_frame) - for face in faces: - start_x, start_y, end_x, end_y = map(int, face.bbox) - padding_x = int((end_x - start_x) * 0.25) - padding_y = int((end_y - start_y) * 0.25) - start_x = max(0, start_x - padding_x) - start_y = max(0, start_y - padding_y) - end_x = max(0, end_x + padding_x) - end_y = max(0, end_y + padding_y) - crop_frame = reference_frame[start_y:end_y, start_x:end_x] - crop_frame = normalize_frame_color(crop_frame) - crop_frames.append(crop_frame) - return crop_frames diff --git a/DeepFakeAI/uis/components/frame_processors.py b/DeepFakeAI/uis/components/frame_processors.py deleted file mode 100644 index c8ec572771e4ad094c991a61e673bf1d1e1f5c62..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/frame_processors.py +++ /dev/null @@ -1,40 +0,0 @@ -from typing import List, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules -from DeepFakeAI.filesystem import list_module_names -from DeepFakeAI.uis.core import register_ui_component - -FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None - - -def render() -> None: - global FRAME_PROCESSORS_CHECKBOX_GROUP - - FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('frame_processors_checkbox_group_label'), - choices = sort_frame_processors(DeepFakeAI.globals.frame_processors), - value = DeepFakeAI.globals.frame_processors - ) - register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP) - - -def listen() -> None: - FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP) - - -def update_frame_processors(frame_processors : List[str]) -> gradio.CheckboxGroup: - DeepFakeAI.globals.frame_processors = frame_processors - clear_frame_processors_modules() - for frame_processor in frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - if not frame_processor_module.pre_check(): - return gradio.CheckboxGroup() - return gradio.CheckboxGroup(value = frame_processors, choices = sort_frame_processors(frame_processors)) - - -def sort_frame_processors(frame_processors : List[str]) -> list[str]: - available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules') - return sorted(available_frame_processors, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors)) diff --git a/DeepFakeAI/uis/components/frame_processors_options.py b/DeepFakeAI/uis/components/frame_processors_options.py deleted file mode 100644 index d355c2e491b539bc819ee338104422731758dfdc..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/frame_processors_options.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import List, Optional, Tuple -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.processors.frame.core import load_frame_processor_module -from DeepFakeAI.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices -from DeepFakeAI.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem -from DeepFakeAI.uis.core import get_ui_component, register_ui_component - -FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None -FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None -FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None -FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None - - -def render() -> None: - global FACE_SWAPPER_MODEL_DROPDOWN - global FACE_ENHANCER_MODEL_DROPDOWN - global FACE_ENHANCER_BLEND_SLIDER - global FRAME_ENHANCER_MODEL_DROPDOWN - global FRAME_ENHANCER_BLEND_SLIDER - global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP - - FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown( - label = wording.get('face_swapper_model_dropdown_label'), - choices = frame_processors_choices.face_swapper_models, - value = frame_processors_globals.face_swapper_model, - visible = 'face_swapper' in DeepFakeAI.globals.frame_processors - ) - FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( - label = wording.get('face_enhancer_model_dropdown_label'), - choices = frame_processors_choices.face_enhancer_models, - value = frame_processors_globals.face_enhancer_model, - visible = 'face_enhancer' in DeepFakeAI.globals.frame_processors - ) - FACE_ENHANCER_BLEND_SLIDER = gradio.Slider( - label = wording.get('face_enhancer_blend_slider_label'), - value = frame_processors_globals.face_enhancer_blend, - step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0], - minimum = frame_processors_choices.face_enhancer_blend_range[0], - maximum = frame_processors_choices.face_enhancer_blend_range[-1], - visible = 'face_enhancer' in DeepFakeAI.globals.frame_processors - ) - FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown( - label = wording.get('frame_enhancer_model_dropdown_label'), - choices = frame_processors_choices.frame_enhancer_models, - value = frame_processors_globals.frame_enhancer_model, - visible = 'frame_enhancer' in DeepFakeAI.globals.frame_processors - ) - FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider( - label = wording.get('frame_enhancer_blend_slider_label'), - value = frame_processors_globals.frame_enhancer_blend, - step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0], - minimum = frame_processors_choices.frame_enhancer_blend_range[0], - maximum = frame_processors_choices.frame_enhancer_blend_range[-1], - visible = 'face_enhancer' in DeepFakeAI.globals.frame_processors - ) - FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('face_debugger_items_checkbox_group_label'), - choices = frame_processors_choices.face_debugger_items, - value = frame_processors_globals.face_debugger_items, - visible = 'face_debugger' in DeepFakeAI.globals.frame_processors - ) - - register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN) - register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN) - register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER) - register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN) - register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER) - register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP) - - -def listen() -> None: - FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN) - FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN) - FACE_ENHANCER_BLEND_SLIDER.change(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER) - FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN) - FRAME_ENHANCER_BLEND_SLIDER.change(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER) - FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP) - frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group') - if frame_processors_checkbox_group: - frame_processors_checkbox_group.change(toggle_face_swapper_model, inputs = frame_processors_checkbox_group, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP ]) - - -def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown: - frame_processors_globals.face_swapper_model = face_swapper_model - if face_swapper_model == 'blendswap_256': - DeepFakeAI.globals.face_recognizer_model = 'arcface_blendswap' - if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16': - DeepFakeAI.globals.face_recognizer_model = 'arcface_inswapper' - if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial': - DeepFakeAI.globals.face_recognizer_model = 'arcface_simswap' - face_swapper_module = load_frame_processor_module('face_swapper') - face_swapper_module.clear_frame_processor() - face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model]) - if not face_swapper_module.pre_check(): - return gradio.Dropdown() - return gradio.Dropdown(value = face_swapper_model) - - -def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown: - frame_processors_globals.face_enhancer_model = face_enhancer_model - face_enhancer_module = load_frame_processor_module('face_enhancer') - face_enhancer_module.clear_frame_processor() - face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model]) - if not face_enhancer_module.pre_check(): - return gradio.Dropdown() - return gradio.Dropdown(value = face_enhancer_model) - - -def update_face_enhancer_blend(face_enhancer_blend : int) -> None: - frame_processors_globals.face_enhancer_blend = face_enhancer_blend - - -def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown: - frame_processors_globals.frame_enhancer_model = frame_enhancer_model - frame_enhancer_module = load_frame_processor_module('frame_enhancer') - frame_enhancer_module.clear_frame_processor() - frame_enhancer_module.set_options('model', frame_enhancer_module.MODELS[frame_enhancer_model]) - if not frame_enhancer_module.pre_check(): - return gradio.Dropdown() - return gradio.Dropdown(value = frame_enhancer_model) - - -def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None: - frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend - - -def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None: - frame_processors_globals.face_debugger_items = face_debugger_items - - -def toggle_face_swapper_model(frame_processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.CheckboxGroup]: - has_face_swapper = 'face_swapper' in frame_processors - has_face_enhancer = 'face_enhancer' in frame_processors - has_frame_enhancer = 'frame_enhancer' in frame_processors - has_face_debugger = 'face_debugger' in frame_processors - return gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.CheckboxGroup(visible = has_face_debugger) diff --git a/DeepFakeAI/uis/components/limit_resources.py b/DeepFakeAI/uis/components/limit_resources.py deleted file mode 100644 index 8858dad4babd5108ce2c46d77e203fb4674a8ca4..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/limit_resources.py +++ /dev/null @@ -1,27 +0,0 @@ -from typing import Optional -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording - -MAX_MEMORY_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global MAX_MEMORY_SLIDER - - MAX_MEMORY_SLIDER = gradio.Slider( - label = wording.get('max_memory_slider_label'), - step = DeepFakeAI.choices.max_memory_range[1] - DeepFakeAI.choices.max_memory_range[0], - minimum = DeepFakeAI.choices.max_memory_range[0], - maximum = DeepFakeAI.choices.max_memory_range[-1] - ) - - -def listen() -> None: - MAX_MEMORY_SLIDER.change(update_max_memory, inputs = MAX_MEMORY_SLIDER) - - -def update_max_memory(max_memory : int) -> None: - DeepFakeAI.globals.max_memory = max_memory if max_memory > 0 else None diff --git a/DeepFakeAI/uis/components/output.py b/DeepFakeAI/uis/components/output.py deleted file mode 100644 index 9d1021b163acb1ecffcb57d6cfed0a6114d91aa5..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/output.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Tuple, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.core import limit_resources, conditional_process -from DeepFakeAI.uis.core import get_ui_component -from DeepFakeAI.normalizer import normalize_output_path -from DeepFakeAI.filesystem import is_image, is_video, clear_temp - -OUTPUT_IMAGE : Optional[gradio.Image] = None -OUTPUT_VIDEO : Optional[gradio.Video] = None -OUTPUT_START_BUTTON : Optional[gradio.Button] = None -OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None - - -def render() -> None: - global OUTPUT_IMAGE - global OUTPUT_VIDEO - global OUTPUT_START_BUTTON - global OUTPUT_CLEAR_BUTTON - - OUTPUT_IMAGE = gradio.Image( - label = wording.get('output_image_or_video_label'), - visible = False - ) - OUTPUT_VIDEO = gradio.Video( - label = wording.get('output_image_or_video_label') - ) - OUTPUT_START_BUTTON = gradio.Button( - value = wording.get('start_button_label'), - variant = 'primary', - size = 'sm' - ) - OUTPUT_CLEAR_BUTTON = gradio.Button( - value = wording.get('clear_button_label'), - size = 'sm' - ) - - -def listen() -> None: - output_path_textbox = get_ui_component('output_path_textbox') - if output_path_textbox: - OUTPUT_START_BUTTON.click(start, inputs = output_path_textbox, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) - OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) - - -def start(output_path : str) -> Tuple[gradio.Image, gradio.Video]: - DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.target_path, output_path) - limit_resources() - conditional_process() - if is_image(DeepFakeAI.globals.output_path): - return gradio.Image(value = DeepFakeAI.globals.output_path, visible = True), gradio.Video(value = None, visible = False) - if is_video(DeepFakeAI.globals.output_path): - return gradio.Image(value = None, visible = False), gradio.Video(value = DeepFakeAI.globals.output_path, visible = True) - return gradio.Image(), gradio.Video() - - -def clear() -> Tuple[gradio.Image, gradio.Video]: - if DeepFakeAI.globals.target_path: - clear_temp(DeepFakeAI.globals.target_path) - return gradio.Image(value = None), gradio.Video(value = None) diff --git a/DeepFakeAI/uis/components/output_options.py b/DeepFakeAI/uis/components/output_options.py deleted file mode 100644 index b8a91c9b06453f368b2b3933f8fd9df427b1e3f5..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/output_options.py +++ /dev/null @@ -1,94 +0,0 @@ -from typing import Optional, Tuple, List -import tempfile -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording -from DeepFakeAI.typing import OutputVideoEncoder -from DeepFakeAI.filesystem import is_image, is_video -from DeepFakeAI.uis.typing import ComponentName -from DeepFakeAI.uis.core import get_ui_component, register_ui_component - -OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None -OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None -OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None -OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global OUTPUT_PATH_TEXTBOX - global OUTPUT_IMAGE_QUALITY_SLIDER - global OUTPUT_VIDEO_ENCODER_DROPDOWN - global OUTPUT_VIDEO_QUALITY_SLIDER - - OUTPUT_PATH_TEXTBOX = gradio.Textbox( - label = wording.get('output_path_textbox_label'), - value = DeepFakeAI.globals.output_path or tempfile.gettempdir(), - max_lines = 1 - ) - OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider( - label = wording.get('output_image_quality_slider_label'), - value = DeepFakeAI.globals.output_image_quality, - step = DeepFakeAI.choices.output_image_quality_range[1] - DeepFakeAI.choices.output_image_quality_range[0], - minimum = DeepFakeAI.choices.output_image_quality_range[0], - maximum = DeepFakeAI.choices.output_image_quality_range[-1], - visible = is_image(DeepFakeAI.globals.target_path) - ) - OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( - label = wording.get('output_video_encoder_dropdown_label'), - choices = DeepFakeAI.choices.output_video_encoders, - value = DeepFakeAI.globals.output_video_encoder, - visible = is_video(DeepFakeAI.globals.target_path) - ) - OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider( - label = wording.get('output_video_quality_slider_label'), - value = DeepFakeAI.globals.output_video_quality, - step = DeepFakeAI.choices.output_video_quality_range[1] - DeepFakeAI.choices.output_video_quality_range[0], - minimum = DeepFakeAI.choices.output_video_quality_range[0], - maximum = DeepFakeAI.choices.output_video_quality_range[-1], - visible = is_video(DeepFakeAI.globals.target_path) - ) - register_ui_component('output_path_textbox', OUTPUT_PATH_TEXTBOX) - - -def listen() -> None: - OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX) - OUTPUT_IMAGE_QUALITY_SLIDER.change(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER) - OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN) - OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER) - multi_component_names : List[ComponentName] =\ - [ - 'source_image', - 'target_image', - 'target_video' - ] - for component_name in multi_component_names: - component = get_ui_component(component_name) - if component: - for method in [ 'upload', 'change', 'clear' ]: - getattr(component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER ]) - - -def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Slider]: - if is_image(DeepFakeAI.globals.target_path): - return gradio.Slider(visible = True), gradio.Dropdown(visible = False), gradio.Slider(visible = False) - if is_video(DeepFakeAI.globals.target_path): - return gradio.Slider(visible = False), gradio.Dropdown(visible = True), gradio.Slider(visible = True) - return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False) - - -def update_output_path(output_path : str) -> None: - DeepFakeAI.globals.output_path = output_path - - -def update_output_image_quality(output_image_quality : int) -> None: - DeepFakeAI.globals.output_image_quality = output_image_quality - - -def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> None: - DeepFakeAI.globals.output_video_encoder = output_video_encoder - - -def update_output_video_quality(output_video_quality : int) -> None: - DeepFakeAI.globals.output_video_quality = output_video_quality diff --git a/DeepFakeAI/uis/components/preview.py b/DeepFakeAI/uis/components/preview.py deleted file mode 100644 index 1af2400f540fd9bc34cd53314dac9e982480bb3f..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/preview.py +++ /dev/null @@ -1,173 +0,0 @@ -from typing import Any, Dict, List, Optional -import cv2 -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.core import conditional_append_reference_faces -from DeepFakeAI.face_store import clear_static_faces, get_reference_faces, clear_reference_faces -from DeepFakeAI.typing import Frame, Face, FaceSet -from DeepFakeAI.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_dimension, read_static_image, read_static_images -from DeepFakeAI.face_analyser import get_average_face, clear_face_analyser -from DeepFakeAI.content_analyser import analyse_frame -from DeepFakeAI.processors.frame.core import load_frame_processor_module -from DeepFakeAI.filesystem import is_image, is_video -from DeepFakeAI.uis.typing import ComponentName -from DeepFakeAI.uis.core import get_ui_component, register_ui_component - -PREVIEW_IMAGE : Optional[gradio.Image] = None -PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global PREVIEW_IMAGE - global PREVIEW_FRAME_SLIDER - - preview_image_args: Dict[str, Any] =\ - { - 'label': wording.get('preview_image_label'), - 'interactive': False - } - preview_frame_slider_args: Dict[str, Any] =\ - { - 'label': wording.get('preview_frame_slider_label'), - 'step': 1, - 'minimum': 0, - 'maximum': 100, - 'visible': False - } - conditional_append_reference_faces() - source_frames = read_static_images(DeepFakeAI.globals.source_paths) - source_face = get_average_face(source_frames) - reference_faces = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - if is_image(DeepFakeAI.globals.target_path): - target_frame = read_static_image(DeepFakeAI.globals.target_path) - preview_frame = process_preview_frame(source_face, reference_faces, target_frame) - preview_image_args['value'] = normalize_frame_color(preview_frame) - if is_video(DeepFakeAI.globals.target_path): - temp_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - preview_frame = process_preview_frame(source_face, reference_faces, temp_frame) - preview_image_args['value'] = normalize_frame_color(preview_frame) - preview_image_args['visible'] = True - preview_frame_slider_args['value'] = DeepFakeAI.globals.reference_frame_number - preview_frame_slider_args['maximum'] = count_video_frame_total(DeepFakeAI.globals.target_path) - preview_frame_slider_args['visible'] = True - PREVIEW_IMAGE = gradio.Image(**preview_image_args) - PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args) - register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER) - - -def listen() -> None: - PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) - multi_one_component_names : List[ComponentName] =\ - [ - 'source_image', - 'target_image', - 'target_video' - ] - for component_name in multi_one_component_names: - component = get_ui_component(component_name) - if component: - for method in [ 'upload', 'change', 'clear' ]: - getattr(component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) - multi_two_component_names : List[ComponentName] =\ - [ - 'target_image', - 'target_video' - ] - for component_name in multi_two_component_names: - component = get_ui_component(component_name) - if component: - for method in [ 'upload', 'change', 'clear' ]: - getattr(component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER) - select_component_names : List[ComponentName] =\ - [ - 'reference_face_position_gallery', - 'face_analyser_order_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown' - ] - for component_name in select_component_names: - component = get_ui_component(component_name) - if component: - component.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) - change_one_component_names : List[ComponentName] =\ - [ - 'face_debugger_items_checkbox_group', - 'face_enhancer_model_dropdown', - 'face_enhancer_blend_slider', - 'frame_enhancer_model_dropdown', - 'frame_enhancer_blend_slider', - 'face_selector_mode_dropdown', - 'reference_face_distance_slider', - 'face_mask_types_checkbox_group', - 'face_mask_blur_slider', - 'face_mask_padding_top_slider', - 'face_mask_padding_bottom_slider', - 'face_mask_padding_left_slider', - 'face_mask_padding_right_slider', - 'face_mask_region_checkbox_group' - ] - for component_name in change_one_component_names: - component = get_ui_component(component_name) - if component: - component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) - change_two_component_names : List[ComponentName] =\ - [ - 'frame_processors_checkbox_group', - 'face_swapper_model_dropdown', - 'face_detector_model_dropdown', - 'face_detector_size_dropdown', - 'face_detector_score_slider' - ] - for component_name in change_two_component_names: - component = get_ui_component(component_name) - if component: - component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) - - -def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image: - clear_face_analyser() - clear_reference_faces() - clear_static_faces() - return update_preview_image(frame_number) - - -def update_preview_image(frame_number : int = 0) -> gradio.Image: - conditional_append_reference_faces() - source_frames = read_static_images(DeepFakeAI.globals.source_paths) - source_face = get_average_face(source_frames) - reference_face = get_reference_faces() if 'reference' in DeepFakeAI.globals.face_selector_mode else None - if is_image(DeepFakeAI.globals.target_path): - target_frame = read_static_image(DeepFakeAI.globals.target_path) - preview_frame = process_preview_frame(source_face, reference_face, target_frame) - preview_frame = normalize_frame_color(preview_frame) - return gradio.Image(value = preview_frame) - if is_video(DeepFakeAI.globals.target_path): - temp_frame = get_video_frame(DeepFakeAI.globals.target_path, frame_number) - preview_frame = process_preview_frame(source_face, reference_face, temp_frame) - preview_frame = normalize_frame_color(preview_frame) - return gradio.Image(value = preview_frame) - return gradio.Image(value = None) - - -def update_preview_frame_slider() -> gradio.Slider: - if is_video(DeepFakeAI.globals.target_path): - video_frame_total = count_video_frame_total(DeepFakeAI.globals.target_path) - return gradio.Slider(maximum = video_frame_total, visible = True) - return gradio.Slider(value = None, maximum = None, visible = False) - - -def process_preview_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame: - temp_frame = resize_frame_dimension(temp_frame, 640, 640) - if analyse_frame(temp_frame): - return cv2.GaussianBlur(temp_frame, (99, 99), 0) - for frame_processor in DeepFakeAI.globals.frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - if frame_processor_module.pre_process('preview'): - temp_frame = frame_processor_module.process_frame( - source_face, - reference_faces, - temp_frame - ) - return temp_frame diff --git a/DeepFakeAI/uis/components/source.py b/DeepFakeAI/uis/components/source.py deleted file mode 100644 index af770e8fab36f446bdcc20ab8ef71345910633ca..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/source.py +++ /dev/null @@ -1,49 +0,0 @@ -from typing import Optional, List -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.uis.typing import File -from DeepFakeAI.filesystem import are_images -from DeepFakeAI.uis.core import register_ui_component - -SOURCE_FILE : Optional[gradio.File] = None -SOURCE_IMAGE : Optional[gradio.Image] = None - - -def render() -> None: - global SOURCE_FILE - global SOURCE_IMAGE - - are_source_images = are_images(DeepFakeAI.globals.source_paths) - SOURCE_FILE = gradio.File( - file_count = 'multiple', - file_types = - [ - '.png', - '.jpg', - '.webp' - ], - label = wording.get('source_file_label'), - value = DeepFakeAI.globals.source_paths if are_source_images else None - ) - source_file_names = [ source_file_value['name'] for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None - SOURCE_IMAGE = gradio.Image( - value = source_file_names[0] if are_source_images else None, - visible = are_source_images, - show_label = False - ) - register_ui_component('source_image', SOURCE_IMAGE) - - -def listen() -> None: - SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE) - - -def update(files : List[File]) -> gradio.Image: - file_names = [ file.name for file in files ] if files else None - if are_images(file_names): - DeepFakeAI.globals.source_paths = file_names - return gradio.Image(value = file_names[0], visible = True) - DeepFakeAI.globals.source_paths = None - return gradio.Image(value = None, visible = False) diff --git a/DeepFakeAI/uis/components/target.py b/DeepFakeAI/uis/components/target.py deleted file mode 100644 index e7da555cd9583a6e9898e256c742c4cbb24c9fd9..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/target.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import Tuple, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.face_store import clear_static_faces, clear_reference_faces -from DeepFakeAI.uis.typing import File -from DeepFakeAI.filesystem import is_image, is_video -from DeepFakeAI.uis.core import register_ui_component - -TARGET_FILE : Optional[gradio.File] = None -TARGET_IMAGE : Optional[gradio.Image] = None -TARGET_VIDEO : Optional[gradio.Video] = None - - -def render() -> None: - global TARGET_FILE - global TARGET_IMAGE - global TARGET_VIDEO - - is_target_image = is_image(DeepFakeAI.globals.target_path) - is_target_video = is_video(DeepFakeAI.globals.target_path) - TARGET_FILE = gradio.File( - label = wording.get('target_file_label'), - file_count = 'single', - file_types = - [ - '.png', - '.jpg', - '.webp', - '.mp4' - ], - value = DeepFakeAI.globals.target_path if is_target_image or is_target_video else None - ) - TARGET_IMAGE = gradio.Image( - value = TARGET_FILE.value['name'] if is_target_image else None, - visible = is_target_image, - show_label = False - ) - TARGET_VIDEO = gradio.Video( - value = TARGET_FILE.value['name'] if is_target_video else None, - visible = is_target_video, - show_label = False - ) - register_ui_component('target_image', TARGET_IMAGE) - register_ui_component('target_video', TARGET_VIDEO) - - -def listen() -> None: - TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ]) - - -def update(file : File) -> Tuple[gradio.Image, gradio.Video]: - clear_reference_faces() - clear_static_faces() - if file and is_image(file.name): - DeepFakeAI.globals.target_path = file.name - return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False) - if file and is_video(file.name): - DeepFakeAI.globals.target_path = file.name - return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True) - DeepFakeAI.globals.target_path = None - return gradio.Image(value = None, visible = False), gradio.Video(value = None, visible = False) diff --git a/DeepFakeAI/uis/components/temp_frame.py b/DeepFakeAI/uis/components/temp_frame.py deleted file mode 100644 index da7a0dcd8e98b649f6ab6c2c79903994b2661128..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/temp_frame.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Optional, Tuple -import gradio - -import DeepFakeAI.globals -import DeepFakeAI.choices -from DeepFakeAI import wording -from DeepFakeAI.typing import TempFrameFormat -from DeepFakeAI.filesystem import is_video -from DeepFakeAI.uis.core import get_ui_component - -TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None -TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global TEMP_FRAME_FORMAT_DROPDOWN - global TEMP_FRAME_QUALITY_SLIDER - - TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown( - label = wording.get('temp_frame_format_dropdown_label'), - choices = DeepFakeAI.choices.temp_frame_formats, - value = DeepFakeAI.globals.temp_frame_format, - visible = is_video(DeepFakeAI.globals.target_path) - ) - TEMP_FRAME_QUALITY_SLIDER = gradio.Slider( - label = wording.get('temp_frame_quality_slider_label'), - value = DeepFakeAI.globals.temp_frame_quality, - step = DeepFakeAI.choices.temp_frame_quality_range[1] - DeepFakeAI.choices.temp_frame_quality_range[0], - minimum = DeepFakeAI.choices.temp_frame_quality_range[0], - maximum = DeepFakeAI.choices.temp_frame_quality_range[-1], - visible = is_video(DeepFakeAI.globals.target_path) - ) - - -def listen() -> None: - TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN) - TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER) - target_video = get_ui_component('target_video') - if target_video: - for method in [ 'upload', 'change', 'clear' ]: - getattr(target_video, method)(remote_update, outputs = [ TEMP_FRAME_FORMAT_DROPDOWN, TEMP_FRAME_QUALITY_SLIDER ]) - - -def remote_update() -> Tuple[gradio.Dropdown, gradio.Slider]: - if is_video(DeepFakeAI.globals.target_path): - return gradio.Dropdown(visible = True), gradio.Slider(visible = True) - return gradio.Dropdown(visible = False), gradio.Slider(visible = False) - - -def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> None: - DeepFakeAI.globals.temp_frame_format = temp_frame_format - - -def update_temp_frame_quality(temp_frame_quality : int) -> None: - DeepFakeAI.globals.temp_frame_quality = temp_frame_quality diff --git a/DeepFakeAI/uis/components/trim_frame.py b/DeepFakeAI/uis/components/trim_frame.py deleted file mode 100644 index 768a22c8f11277c044e16f81477b504b39d804ee..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/trim_frame.py +++ /dev/null @@ -1,71 +0,0 @@ -from typing import Any, Dict, Tuple, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.vision import count_video_frame_total -from DeepFakeAI.filesystem import is_video -from DeepFakeAI.uis.core import get_ui_component - -TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None -TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global TRIM_FRAME_START_SLIDER - global TRIM_FRAME_END_SLIDER - - trim_frame_start_slider_args : Dict[str, Any] =\ - { - 'label': wording.get('trim_frame_start_slider_label'), - 'step': 1, - 'minimum': 0, - 'maximum': 100, - 'visible': False - } - trim_frame_end_slider_args : Dict[str, Any] =\ - { - 'label': wording.get('trim_frame_end_slider_label'), - 'step': 1, - 'minimum': 0, - 'maximum': 100, - 'visible': False - } - if is_video(DeepFakeAI.globals.target_path): - video_frame_total = count_video_frame_total(DeepFakeAI.globals.target_path) - trim_frame_start_slider_args['value'] = DeepFakeAI.globals.trim_frame_start or 0 - trim_frame_start_slider_args['maximum'] = video_frame_total - trim_frame_start_slider_args['visible'] = True - trim_frame_end_slider_args['value'] = DeepFakeAI.globals.trim_frame_end or video_frame_total - trim_frame_end_slider_args['maximum'] = video_frame_total - trim_frame_end_slider_args['visible'] = True - with gradio.Row(): - TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args) - TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args) - - -def listen() -> None: - TRIM_FRAME_START_SLIDER.change(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER) - TRIM_FRAME_END_SLIDER.change(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER) - target_video = get_ui_component('target_video') - if target_video: - for method in [ 'upload', 'change', 'clear' ]: - getattr(target_video, method)(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ]) - - -def remote_update() -> Tuple[gradio.Slider, gradio.Slider]: - if is_video(DeepFakeAI.globals.target_path): - video_frame_total = count_video_frame_total(DeepFakeAI.globals.target_path) - DeepFakeAI.globals.trim_frame_start = None - DeepFakeAI.globals.trim_frame_end = None - return gradio.Slider(value = 0, maximum = video_frame_total, visible = True), gradio.Slider(value = video_frame_total, maximum = video_frame_total, visible = True) - return gradio.Slider(value = None, maximum = None, visible = False), gradio.Slider(value = None, maximum = None, visible = False) - - -def update_trim_frame_start(trim_frame_start : int) -> None: - DeepFakeAI.globals.trim_frame_start = trim_frame_start if trim_frame_start > 0 else None - - -def update_trim_frame_end(trim_frame_end : int) -> None: - video_frame_total = count_video_frame_total(DeepFakeAI.globals.target_path) - DeepFakeAI.globals.trim_frame_end = trim_frame_end if trim_frame_end < video_frame_total else None diff --git a/DeepFakeAI/uis/components/webcam.py b/DeepFakeAI/uis/components/webcam.py deleted file mode 100644 index b5c4470872c65f68b6a651482c56f7f12cfbb5a0..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/webcam.py +++ /dev/null @@ -1,155 +0,0 @@ -from typing import Optional, Generator, Deque -from concurrent.futures import ThreadPoolExecutor -from collections import deque -import os -import platform -import subprocess -import cv2 -import gradio -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI import logger, wording -from DeepFakeAI.content_analyser import analyse_stream -from DeepFakeAI.typing import Frame, Face -from DeepFakeAI.face_analyser import get_average_face -from DeepFakeAI.processors.frame.core import get_frame_processors_modules -from DeepFakeAI.ffmpeg import open_ffmpeg -from DeepFakeAI.vision import normalize_frame_color, read_static_images -from DeepFakeAI.uis.typing import StreamMode, WebcamMode -from DeepFakeAI.uis.core import get_ui_component - -WEBCAM_CAPTURE : Optional[cv2.VideoCapture] = None -WEBCAM_IMAGE : Optional[gradio.Image] = None -WEBCAM_START_BUTTON : Optional[gradio.Button] = None -WEBCAM_STOP_BUTTON : Optional[gradio.Button] = None - - -def get_webcam_capture() -> Optional[cv2.VideoCapture]: - global WEBCAM_CAPTURE - - if WEBCAM_CAPTURE is None: - if platform.system().lower() == 'windows': - webcam_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW) - else: - webcam_capture = cv2.VideoCapture(0) - if webcam_capture and webcam_capture.isOpened(): - WEBCAM_CAPTURE = webcam_capture - return WEBCAM_CAPTURE - - -def clear_webcam_capture() -> None: - global WEBCAM_CAPTURE - - if WEBCAM_CAPTURE: - WEBCAM_CAPTURE.release() - WEBCAM_CAPTURE = None - - -def render() -> None: - global WEBCAM_IMAGE - global WEBCAM_START_BUTTON - global WEBCAM_STOP_BUTTON - - WEBCAM_IMAGE = gradio.Image( - label = wording.get('webcam_image_label') - ) - WEBCAM_START_BUTTON = gradio.Button( - value = wording.get('start_button_label'), - variant = 'primary', - size = 'sm' - ) - WEBCAM_STOP_BUTTON = gradio.Button( - value = wording.get('stop_button_label'), - size = 'sm' - ) - - -def listen() -> None: - start_event = None - webcam_mode_radio = get_ui_component('webcam_mode_radio') - webcam_resolution_dropdown = get_ui_component('webcam_resolution_dropdown') - webcam_fps_slider = get_ui_component('webcam_fps_slider') - if webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider: - start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE) - WEBCAM_STOP_BUTTON.click(stop, cancels = start_event) - source_image = get_ui_component('source_image') - if source_image: - for method in [ 'upload', 'change', 'clear' ]: - getattr(source_image, method)(stop, cancels = start_event) - - -def start(webcam_mode : WebcamMode, resolution : str, fps : float) -> Generator[Frame, None, None]: - DeepFakeAI.globals.face_selector_mode = 'one' - DeepFakeAI.globals.face_analyser_order = 'large-small' - source_frames = read_static_images(DeepFakeAI.globals.source_paths) - source_face = get_average_face(source_frames) - stream = None - if webcam_mode in [ 'udp', 'v4l2' ]: - stream = open_stream(webcam_mode, resolution, fps) # type: ignore[arg-type] - webcam_width, webcam_height = map(int, resolution.split('x')) - webcam_capture = get_webcam_capture() - if webcam_capture and webcam_capture.isOpened(): - webcam_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) # type: ignore[attr-defined] - webcam_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width) - webcam_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height) - webcam_capture.set(cv2.CAP_PROP_FPS, fps) - for capture_frame in multi_process_capture(source_face, webcam_capture, fps): - if webcam_mode == 'inline': - yield normalize_frame_color(capture_frame) - else: - try: - stream.stdin.write(capture_frame.tobytes()) - except Exception: - clear_webcam_capture() - yield None - - -def multi_process_capture(source_face : Face, webcam_capture : cv2.VideoCapture, fps : float) -> Generator[Frame, None, None]: - with tqdm(desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = DeepFakeAI.globals.log_level in [ 'warn', 'error' ]) as progress: - with ThreadPoolExecutor(max_workers = DeepFakeAI.globals.execution_thread_count) as executor: - futures = [] - deque_capture_frames : Deque[Frame] = deque() - while webcam_capture and webcam_capture.isOpened(): - _, capture_frame = webcam_capture.read() - if analyse_stream(capture_frame, fps): - return - future = executor.submit(process_stream_frame, source_face, capture_frame) - futures.append(future) - for future_done in [ future for future in futures if future.done() ]: - capture_frame = future_done.result() - deque_capture_frames.append(capture_frame) - futures.remove(future_done) - while deque_capture_frames: - progress.update() - yield deque_capture_frames.popleft() - - -def stop() -> gradio.Image: - clear_webcam_capture() - return gradio.Image(value = None) - - -def process_stream_frame(source_face : Face, temp_frame : Frame) -> Frame: - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - if frame_processor_module.pre_process('stream'): - temp_frame = frame_processor_module.process_frame( - source_face, - None, - temp_frame - ) - return temp_frame - - -def open_stream(stream_mode : StreamMode, resolution : str, fps : float) -> subprocess.Popen[bytes]: - commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', resolution, '-r', str(fps), '-i', '-' ] - if stream_mode == 'udp': - commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ]) - if stream_mode == 'v4l2': - try: - device_name = os.listdir('/sys/devices/virtual/video4linux')[0] - if device_name: - commands.extend([ '-f', 'v4l2', '/dev/' + device_name ]) - except FileNotFoundError: - logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__.upper()) - return open_ffmpeg(commands) diff --git a/DeepFakeAI/uis/components/webcam_options.py b/DeepFakeAI/uis/components/webcam_options.py deleted file mode 100644 index e1bcb6078902ca8ecf7781222756882505c31d47..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/webcam_options.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import Optional -import gradio - -from DeepFakeAI import wording -from DeepFakeAI.uis import choices as uis_choices -from DeepFakeAI.uis.core import register_ui_component - -WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None -WEBCAM_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None -WEBCAM_FPS_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global WEBCAM_MODE_RADIO - global WEBCAM_RESOLUTION_DROPDOWN - global WEBCAM_FPS_SLIDER - - WEBCAM_MODE_RADIO = gradio.Radio( - label = wording.get('webcam_mode_radio_label'), - choices = uis_choices.webcam_modes, - value = 'inline' - ) - WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown( - label = wording.get('webcam_resolution_dropdown'), - choices = uis_choices.webcam_resolutions, - value = uis_choices.webcam_resolutions[0] - ) - WEBCAM_FPS_SLIDER = gradio.Slider( - label = wording.get('webcam_fps_slider'), - value = 25, - step = 1, - minimum = 1, - maximum = 60 - ) - register_ui_component('webcam_mode_radio', WEBCAM_MODE_RADIO) - register_ui_component('webcam_resolution_dropdown', WEBCAM_RESOLUTION_DROPDOWN) - register_ui_component('webcam_fps_slider', WEBCAM_FPS_SLIDER) diff --git a/DeepFakeAI/uis/core.py b/DeepFakeAI/uis/core.py deleted file mode 100644 index 80bc413a6477e694484bda9d89eddd3a70252a72..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/core.py +++ /dev/null @@ -1,131 +0,0 @@ -from typing import Dict, Optional, Any, List -from types import ModuleType -import importlib -import sys -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import metadata, logger, wording -from DeepFakeAI.uis.typing import Component, ComponentName -from DeepFakeAI.filesystem import resolve_relative_path - -UI_COMPONENTS: Dict[ComponentName, Component] = {} -UI_LAYOUT_MODULES : List[ModuleType] = [] -UI_LAYOUT_METHODS =\ -[ - 'pre_check', - 'pre_render', - 'render', - 'listen', - 'run' -] - - -def load_ui_layout_module(ui_layout : str) -> Any: - try: - ui_layout_module = importlib.import_module('DeepFakeAI.uis.layouts.' + ui_layout) - for method_name in UI_LAYOUT_METHODS: - if not hasattr(ui_layout_module, method_name): - raise NotImplementedError - except ModuleNotFoundError as exception: - logger.debug(exception.msg, __name__.upper()) - sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout)) - except NotImplementedError: - sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout)) - return ui_layout_module - - -def get_ui_layouts_modules(ui_layouts : List[str]) -> List[ModuleType]: - global UI_LAYOUT_MODULES - - if not UI_LAYOUT_MODULES: - for ui_layout in ui_layouts: - ui_layout_module = load_ui_layout_module(ui_layout) - UI_LAYOUT_MODULES.append(ui_layout_module) - return UI_LAYOUT_MODULES - - -def get_ui_component(name : ComponentName) -> Optional[Component]: - if name in UI_COMPONENTS: - return UI_COMPONENTS[name] - return None - - -def register_ui_component(name : ComponentName, component: Component) -> None: - UI_COMPONENTS[name] = component - - -def launch() -> None: - with gradio.Blocks(theme = get_theme(), css = get_css(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui: - for ui_layout in DeepFakeAI.globals.ui_layouts: - ui_layout_module = load_ui_layout_module(ui_layout) - if ui_layout_module.pre_render(): - ui_layout_module.render() - ui_layout_module.listen() - - for ui_layout in DeepFakeAI.globals.ui_layouts: - ui_layout_module = load_ui_layout_module(ui_layout) - ui_layout_module.run(ui) - - -def get_theme() -> gradio.Theme: - return gradio.themes.Base( - primary_hue = gradio.themes.colors.red, - secondary_hue = gradio.themes.colors.neutral, - font = gradio.themes.GoogleFont('Open Sans') - ).set( - background_fill_primary = '*neutral_100', - block_background_fill = 'white', - block_border_width = '0', - block_label_background_fill = '*primary_100', - block_label_background_fill_dark = '*primary_600', - block_label_border_width = 'none', - block_label_margin = '0.5rem', - block_label_radius = '*radius_md', - block_label_text_color = '*primary_500', - block_label_text_color_dark = 'white', - block_label_text_weight = '600', - block_title_background_fill = '*primary_100', - block_title_background_fill_dark = '*primary_600', - block_title_padding = '*block_label_padding', - block_title_radius = '*block_label_radius', - block_title_text_color = '*primary_500', - block_title_text_size = '*text_sm', - block_title_text_weight = '600', - block_padding = '0.5rem', - border_color_primary = 'transparent', - border_color_primary_dark = 'transparent', - button_large_padding = '2rem 0.5rem', - button_large_text_weight = 'normal', - button_primary_background_fill = '*primary_500', - button_primary_text_color = 'white', - button_secondary_background_fill = 'white', - button_secondary_border_color = 'transparent', - button_secondary_border_color_dark = 'transparent', - button_secondary_border_color_hover = 'transparent', - button_secondary_border_color_hover_dark = 'transparent', - button_secondary_text_color = '*neutral_800', - button_small_padding = '0.75rem', - checkbox_background_color = '*neutral_200', - checkbox_background_color_selected = '*primary_600', - checkbox_background_color_selected_dark = '*primary_700', - checkbox_border_color_focus = '*primary_500', - checkbox_border_color_focus_dark = '*primary_600', - checkbox_border_color_selected = '*primary_600', - checkbox_border_color_selected_dark = '*primary_700', - checkbox_label_background_fill = '*neutral_50', - checkbox_label_background_fill_hover = '*neutral_50', - checkbox_label_background_fill_selected = '*primary_500', - checkbox_label_background_fill_selected_dark = '*primary_600', - checkbox_label_text_color_selected = 'white', - input_background_fill = '*neutral_50', - shadow_drop = 'none', - slider_color = '*primary_500', - slider_color_dark = '*primary_600' - ) - - -def get_css() -> str: - fixes_css_path = resolve_relative_path('uis/assets/fixes.css') - overrides_css_path = resolve_relative_path('uis/assets/overrides.css') - return open(fixes_css_path, 'r').read() + open(overrides_css_path, 'r').read() diff --git a/DeepFakeAI/uis/layouts/benchmark.py b/DeepFakeAI/uis/layouts/benchmark.py deleted file mode 100644 index b847d4e17b84f9edd29fb8eabeb7b6f4f624c57b..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/layouts/benchmark.py +++ /dev/null @@ -1,63 +0,0 @@ -import gradio - -import DeepFakeAI.globals -from DeepFakeAI.download import conditional_download -from DeepFakeAI.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, benchmark_options, benchmark - - -def pre_check() -> bool: - if not DeepFakeAI.globals.skip_download: - conditional_download('.assets/examples', - [ - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/source.jpg', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-360p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-540p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-720p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1080p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1440p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-2160p.mp4' - ]) - return True - return False - - -def pre_render() -> bool: - return True - - -def render() -> gradio.Blocks: - with gradio.Blocks() as layout: - with gradio.Row(): - with gradio.Column(scale = 2): - with gradio.Blocks(): - about.render() - with gradio.Blocks(): - frame_processors.render() - frame_processors_options.render() - with gradio.Blocks(): - execution.render() - execution_thread_count.render() - execution_queue_count.render() - with gradio.Blocks(): - limit_resources.render() - with gradio.Blocks(): - benchmark_options.render() - with gradio.Column(scale = 5): - with gradio.Blocks(): - benchmark.render() - return layout - - -def listen() -> None: - frame_processors.listen() - frame_processors_options.listen() - execution.listen() - execution_thread_count.listen() - execution_queue_count.listen() - limit_resources.listen() - benchmark.listen() - - -def run(ui : gradio.Blocks) -> None: - ui.queue(concurrency_count = 2, api_open = False).launch(show_api = False) diff --git a/DeepFakeAI/uis/layouts/default.py b/DeepFakeAI/uis/layouts/default.py deleted file mode 100644 index d6f611c9f65c55c8babb360c6b019fdec7d28fd2..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/layouts/default.py +++ /dev/null @@ -1,77 +0,0 @@ -import gradio - -from DeepFakeAI.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, temp_frame, output_options, common_options, source, target, output, preview, trim_frame, face_analyser, face_selector, face_masker - - -def pre_check() -> bool: - return True - - -def pre_render() -> bool: - return True - - -def render() -> gradio.Blocks: - with gradio.Blocks() as layout: - with gradio.Row(): - with gradio.Column(scale = 2): - with gradio.Blocks(): - about.render() - with gradio.Blocks(): - frame_processors.render() - frame_processors_options.render() - with gradio.Blocks(): - execution.render() - execution_thread_count.render() - execution_queue_count.render() - with gradio.Blocks(): - limit_resources.render() - with gradio.Blocks(): - temp_frame.render() - with gradio.Blocks(): - output_options.render() - with gradio.Blocks(): - common_options.render() - with gradio.Column(scale = 2): - with gradio.Blocks(): - source.render() - with gradio.Blocks(): - target.render() - with gradio.Blocks(): - output.render() - with gradio.Column(scale = 3): - with gradio.Blocks(): - preview.render() - with gradio.Blocks(): - trim_frame.render() - with gradio.Blocks(): - face_selector.render() - with gradio.Blocks(): - face_masker.render() - with gradio.Blocks(): - face_analyser.render() - return layout - - -def listen() -> None: - frame_processors.listen() - frame_processors_options.listen() - execution.listen() - execution_thread_count.listen() - execution_queue_count.listen() - limit_resources.listen() - temp_frame.listen() - output_options.listen() - common_options.listen() - source.listen() - target.listen() - output.listen() - preview.listen() - trim_frame.listen() - face_selector.listen() - face_masker.listen() - face_analyser.listen() - - -def run(ui : gradio.Blocks) -> None: - ui.launch(show_api = False) diff --git a/DeepFakeAI/uis/layouts/webcam.py b/DeepFakeAI/uis/layouts/webcam.py deleted file mode 100644 index b3c60cf51021109209e3e29acf4b71bd97364ece..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/layouts/webcam.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio - -from DeepFakeAI.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, webcam_options, source, webcam - - -def pre_check() -> bool: - return True - - -def pre_render() -> bool: - return True - - -def render() -> gradio.Blocks: - with gradio.Blocks() as layout: - with gradio.Row(): - with gradio.Column(scale = 2): - with gradio.Blocks(): - about.render() - with gradio.Blocks(): - frame_processors.render() - frame_processors_options.render() - with gradio.Blocks(): - execution.render() - execution_thread_count.render() - with gradio.Blocks(): - webcam_options.render() - with gradio.Blocks(): - source.render() - with gradio.Column(scale = 5): - with gradio.Blocks(): - webcam.render() - return layout - - -def listen() -> None: - frame_processors.listen() - frame_processors_options.listen() - execution.listen() - execution_thread_count.listen() - source.listen() - webcam.listen() - - -def run(ui : gradio.Blocks) -> None: - ui.queue(concurrency_count = 2, api_open = False).launch(show_api = False) diff --git a/DeepFakeAI/uis/typing.py b/DeepFakeAI/uis/typing.py deleted file mode 100644 index b2c57d326df2430571c60a6c498e992a1c1ba2f4..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/typing.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Literal, Any, IO -import gradio - -File = IO[Any] -Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider -ComponentName = Literal\ -[ - 'source_image', - 'target_image', - 'target_video', - 'preview_frame_slider', - 'face_selector_mode_dropdown', - 'reference_face_position_gallery', - 'reference_face_distance_slider', - 'face_analyser_order_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown', - 'face_detector_model_dropdown', - 'face_detector_size_dropdown', - 'face_detector_score_slider', - 'face_mask_types_checkbox_group', - 'face_mask_blur_slider', - 'face_mask_padding_top_slider', - 'face_mask_padding_bottom_slider', - 'face_mask_padding_left_slider', - 'face_mask_padding_right_slider', - 'face_mask_region_checkbox_group', - 'frame_processors_checkbox_group', - 'face_swapper_model_dropdown', - 'face_enhancer_model_dropdown', - 'face_enhancer_blend_slider', - 'frame_enhancer_model_dropdown', - 'frame_enhancer_blend_slider', - 'face_debugger_items_checkbox_group', - 'output_path_textbox', - 'benchmark_runs_checkbox_group', - 'benchmark_cycles_slider', - 'webcam_mode_radio', - 'webcam_resolution_dropdown', - 'webcam_fps_slider' -] -WebcamMode = Literal['inline', 'udp', 'v4l2'] -StreamMode = Literal['udp', 'v4l2'] diff --git a/DeepFakeAI/vision.py b/DeepFakeAI/vision.py deleted file mode 100644 index c8494638c1b8876d2701fd0ea9b459d28714adac..0000000000000000000000000000000000000000 --- a/DeepFakeAI/vision.py +++ /dev/null @@ -1,75 +0,0 @@ -from typing import Optional, List -from functools import lru_cache -import cv2 - -from DeepFakeAI.typing import Frame - - -def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]: - if video_path: - video_capture = cv2.VideoCapture(video_path) - if video_capture.isOpened(): - frame_total = video_capture.get(cv2.CAP_PROP_FRAME_COUNT) - video_capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) - has_frame, frame = video_capture.read() - video_capture.release() - if has_frame: - return frame - return None - - -def detect_fps(video_path : str) -> Optional[float]: - if video_path: - video_capture = cv2.VideoCapture(video_path) - if video_capture.isOpened(): - return video_capture.get(cv2.CAP_PROP_FPS) - return None - - -def count_video_frame_total(video_path : str) -> int: - if video_path: - video_capture = cv2.VideoCapture(video_path) - if video_capture.isOpened(): - video_frame_total = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT)) - video_capture.release() - return video_frame_total - return 0 - - -def normalize_frame_color(frame : Frame) -> Frame: - return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - -def resize_frame_dimension(frame : Frame, max_width : int, max_height : int) -> Frame: - height, width = frame.shape[:2] - if height > max_height or width > max_width: - scale = min(max_height / height, max_width / width) - new_width = int(width * scale) - new_height = int(height * scale) - return cv2.resize(frame, (new_width, new_height)) - return frame - - -@lru_cache(maxsize = 128) -def read_static_image(image_path : str) -> Optional[Frame]: - return read_image(image_path) - - -def read_static_images(image_paths : List[str]) -> Optional[List[Frame]]: - frames = [] - if image_paths: - for image_path in image_paths: - frames.append(read_static_image(image_path)) - return frames - - -def read_image(image_path : str) -> Optional[Frame]: - if image_path: - return cv2.imread(image_path) - return None - - -def write_image(image_path : str, frame : Frame) -> bool: - if image_path: - return cv2.imwrite(image_path, frame) - return False diff --git a/DeepFakeAI/wording.py b/DeepFakeAI/wording.py deleted file mode 100644 index 78f3cd3df122303c4427c874814d99dfc41f1867..0000000000000000000000000000000000000000 --- a/DeepFakeAI/wording.py +++ /dev/null @@ -1,136 +0,0 @@ -WORDING =\ -{ - 'python_not_supported': 'Python version is not supported, upgrade to {version} or higher', - 'ffmpeg_not_installed': 'FFMpeg is not installed', - 'install_dependency_help': 'select the variant of {dependency} to install', - 'skip_venv_help': 'skip the virtual environment check', - 'source_help': 'select a source image', - 'target_help': 'select a target image or video', - 'output_help': 'specify the output file or directory', - 'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)', - 'frame_processor_model_help': 'choose the model for the frame processor', - 'frame_processor_blend_help': 'specify the blend amount for the frame processor', - 'face_debugger_items_help': 'specify the face debugger items (choices: {choices})', - 'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)', - 'keep_fps_help': 'preserve the frames per second (fps) of the target', - 'keep_temp_help': 'retain temporary frames after processing', - 'skip_audio_help': 'omit audio from the target', - 'face_analyser_order_help': 'specify the order used for the face analyser', - 'face_analyser_age_help': 'specify the age used for the face analyser', - 'face_analyser_gender_help': 'specify the gender used for the face analyser', - 'face_detector_model_help': 'specify the model used for the face detector', - 'face_detector_size_help': 'specify the size threshold used for the face detector', - 'face_detector_score_help': 'specify the score threshold used for the face detector', - 'face_selector_mode_help': 'specify the mode for the face selector', - 'reference_face_position_help': 'specify the position of the reference face', - 'reference_face_distance_help': 'specify the distance between the reference face and the target face', - 'reference_frame_number_help': 'specify the number of the reference frame', - 'face_mask_types_help': 'choose from the available face mask types (choices: {choices})', - 'face_mask_blur_help': 'specify the blur amount for face mask', - 'face_mask_padding_help': 'specify the face mask padding (top, right, bottom, left) in percent', - 'face_mask_regions_help': 'choose from the available face mask regions (choices: {choices})', - 'trim_frame_start_help': 'specify the start frame for extraction', - 'trim_frame_end_help': 'specify the end frame for extraction', - 'temp_frame_format_help': 'specify the image format used for frame extraction', - 'temp_frame_quality_help': 'specify the image quality used for frame extraction', - 'output_image_quality_help': 'specify the quality used for the output image', - 'output_video_encoder_help': 'specify the encoder used for the output video', - 'output_video_quality_help': 'specify the quality used for the output video', - 'max_memory_help': 'specify the maximum amount of ram to be used (in gb)', - 'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)', - 'execution_thread_count_help': 'specify the number of execution threads', - 'execution_queue_count_help': 'specify the number of execution queries', - 'skip_download_help': 'omit automate downloads and lookups', - 'headless_help': 'run the program in headless mode', - 'log_level_help': 'choose from the available log levels', - 'creating_temp': 'Creating temporary resources', - 'extracting_frames_fps': 'Extracting frames with {fps} FPS', - 'analysing': 'Analysing', - 'processing': 'Processing', - 'downloading': 'Downloading', - 'temp_frames_not_found': 'Temporary frames not found', - 'compressing_image': 'Compressing image', - 'compressing_image_failed': 'Compressing image failed', - 'merging_video_fps': 'Merging video with {fps} FPS', - 'merging_video_failed': 'Merging video failed', - 'skipping_audio': 'Skipping audio', - 'restoring_audio': 'Restoring audio', - 'restoring_audio_skipped': 'Restoring audio skipped', - 'clearing_temp': 'Clearing temporary resources', - 'processing_image_succeed': 'Processing to image succeed', - 'processing_image_failed': 'Processing to image failed', - 'processing_video_succeed': 'Processing to video succeed', - 'processing_video_failed': 'Processing to video failed', - 'model_download_not_done': 'Download of the model is not done', - 'model_file_not_present': 'File of the model is not present', - 'select_image_source': 'Select an image for source path', - 'select_image_or_video_target': 'Select an image or video for target path', - 'select_file_or_directory_output': 'Select an file or directory for output path', - 'no_source_face_detected': 'No source face detected', - 'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded', - 'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly', - 'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded', - 'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly', - 'stream_not_loaded': 'Stream {stream_mode} could not be loaded', - 'donate_button_label': 'DONATE', - 'start_button_label': 'START', - 'stop_button_label': 'STOP', - 'clear_button_label': 'CLEAR', - 'benchmark_runs_checkbox_group_label': 'BENCHMARK RUNS', - 'benchmark_results_dataframe_label': 'BENCHMARK RESULTS', - 'benchmark_cycles_slider_label': 'BENCHMARK CYCLES', - 'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS', - 'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT', - 'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT', - 'face_analyser_order_dropdown_label': 'FACE ANALYSER ORDER', - 'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE', - 'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER', - 'face_detector_model_dropdown_label': 'FACE DETECTOR MODEL', - 'face_detector_size_dropdown_label': 'FACE DETECTOR SIZE', - 'face_detector_score_slider_label': 'FACE DETECTOR SCORE', - 'face_selector_mode_dropdown_label': 'FACE SELECTOR MODE', - 'reference_face_gallery_label': 'REFERENCE FACE', - 'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE', - 'face_mask_types_checkbox_group_label': 'FACE MASK TYPES', - 'face_mask_blur_slider_label': 'FACE MASK BLUR', - 'face_mask_padding_top_slider_label': 'FACE MASK PADDING TOP', - 'face_mask_padding_bottom_slider_label': 'FACE MASK PADDING BOTTOM', - 'face_mask_padding_left_slider_label': 'FACE MASK PADDING LEFT', - 'face_mask_padding_right_slider_label': 'FACE MASK PADDING RIGHT', - 'face_mask_region_checkbox_group_label': 'FACE MASK REGIONS', - 'max_memory_slider_label': 'MAX MEMORY', - 'output_image_or_video_label': 'OUTPUT', - 'output_path_textbox_label': 'OUTPUT PATH', - 'output_image_quality_slider_label': 'OUTPUT IMAGE QUALITY', - 'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER', - 'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY', - 'preview_image_label': 'PREVIEW', - 'preview_frame_slider_label': 'PREVIEW FRAME', - 'frame_processors_checkbox_group_label': 'FRAME PROCESSORS', - 'face_swapper_model_dropdown_label': 'FACE SWAPPER MODEL', - 'face_enhancer_model_dropdown_label': 'FACE ENHANCER MODEL', - 'face_enhancer_blend_slider_label': 'FACE ENHANCER BLEND', - 'frame_enhancer_model_dropdown_label': 'FRAME ENHANCER MODEL', - 'frame_enhancer_blend_slider_label': 'FRAME ENHANCER BLEND', - 'face_debugger_items_checkbox_group_label': 'FACE DEBUGGER ITEMS', - 'common_options_checkbox_group_label': 'OPTIONS', - 'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT', - 'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY', - 'trim_frame_start_slider_label': 'TRIM FRAME START', - 'trim_frame_end_slider_label': 'TRIM FRAME END', - 'source_file_label': 'SOURCE', - 'target_file_label': 'TARGET', - 'webcam_image_label': 'WEBCAM', - 'webcam_mode_radio_label': 'WEBCAM MODE', - 'webcam_resolution_dropdown': 'WEBCAM RESOLUTION', - 'webcam_fps_slider': 'WEBCAM FPS', - 'point': '.', - 'comma': ',', - 'colon': ':', - 'question_mark': '?', - 'exclamation_mark': '!' -} - - -def get(key : str) -> str: - return WORDING[key]