|
from time import sleep
|
|
from typing import Optional
|
|
import cv2
|
|
import gradio
|
|
import numpy
|
|
|
|
from ffff import logger, process_manager, state_manager, wording
|
|
from ffff.audio import create_empty_audio_frame, get_audio_frame
|
|
from ffff.common_helper import get_first
|
|
from ffff.content_analyser import analyse_frame
|
|
from ffff.core import conditional_append_reference_faces
|
|
from ffff.face_analyser import get_average_face, get_many_faces
|
|
from ffff.face_store import clear_reference_faces, clear_static_faces, get_reference_faces
|
|
from ffff.filesystem import filter_audio_paths, is_image, is_video
|
|
from ffff.processors.core import get_processors_modules
|
|
from ffff.typing import AudioFrame, Face, FaceSet, VisionFrame
|
|
from ffff.uis.core import get_ui_component, get_ui_components, register_ui_component
|
|
from ffff.vision import count_video_frame_total, detect_frame_orientation, get_video_frame, normalize_frame_color, read_static_image, read_static_images, resize_frame_resolution
|
|
|
|
PREVIEW_IMAGE: Optional[gradio.Image] = None
|
|
PREVIEW_FRAME_SLIDER: Optional[gradio.Slider] = None
|
|
PREVIEW_ORIGINAL_IMAGE: Optional[gradio.Image] = None
|
|
|
|
|
|
def render() -> None:
|
|
global PREVIEW_IMAGE
|
|
global PREVIEW_FRAME_SLIDER
|
|
global PREVIEW_ORIGINAL_IMAGE
|
|
|
|
preview_image_args = {
|
|
'label': wording.get('uis.preview_image'),
|
|
'interactive': False
|
|
}
|
|
preview_frame_slider_args = {
|
|
'label': wording.get('uis.preview_frame_slider'),
|
|
'step': 1,
|
|
'minimum': 0,
|
|
'maximum': 100,
|
|
'visible': False
|
|
}
|
|
|
|
original_image_args = {
|
|
'label': 'Original Frame',
|
|
'interactive': False,
|
|
'value': None
|
|
}
|
|
|
|
conditional_append_reference_faces()
|
|
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
|
source_frames = read_static_images(state_manager.get_item('source_paths'))
|
|
source_faces = get_many_faces(source_frames)
|
|
source_face = get_average_face(source_faces)
|
|
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
|
|
source_audio_frame = create_empty_audio_frame()
|
|
|
|
if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'):
|
|
temp_audio_frame = get_audio_frame(source_audio_path, state_manager.get_item('output_video_fps'), state_manager.get_item('reference_frame_number'))
|
|
if numpy.any(temp_audio_frame):
|
|
source_audio_frame = temp_audio_frame
|
|
|
|
if is_image(state_manager.get_item('target_path')):
|
|
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
|
|
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame)
|
|
preview_image_args['value'] = normalize_frame_color(preview_vision_frame)
|
|
original_image_args['value'] = normalize_frame_color(target_vision_frame)
|
|
elif is_video(state_manager.get_item('target_path')):
|
|
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number'))
|
|
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
|
|
preview_image_args['value'] = normalize_frame_color(preview_vision_frame)
|
|
original_image_args['value'] = normalize_frame_color(temp_vision_frame)
|
|
preview_image_args['visible'] = True
|
|
preview_frame_slider_args['value'] = state_manager.get_item('reference_frame_number')
|
|
preview_frame_slider_args['maximum'] = count_video_frame_total(state_manager.get_item('target_path'))
|
|
preview_frame_slider_args['visible'] = True
|
|
|
|
PREVIEW_IMAGE = gradio.Image(**preview_image_args)
|
|
PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args)
|
|
PREVIEW_ORIGINAL_IMAGE = gradio.Image(**original_image_args)
|
|
|
|
register_ui_component('preview_image', PREVIEW_IMAGE)
|
|
register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
|
|
register_ui_component('preview_original_image', PREVIEW_ORIGINAL_IMAGE)
|
|
|
|
|
|
def listen() -> None:
|
|
PREVIEW_FRAME_SLIDER.release(update_images, inputs=PREVIEW_FRAME_SLIDER, outputs=[PREVIEW_ORIGINAL_IMAGE, PREVIEW_IMAGE])
|
|
|
|
reference_face_position_gallery = get_ui_component('reference_face_position_gallery')
|
|
if reference_face_position_gallery:
|
|
reference_face_position_gallery.select(update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
|
|
|
for ui_component in get_ui_components(['source_audio', 'source_image', 'target_image', 'target_video']):
|
|
for method in ['upload', 'change', 'clear']:
|
|
getattr(ui_component, method)(update_images, inputs=PREVIEW_FRAME_SLIDER, outputs=[PREVIEW_ORIGINAL_IMAGE, PREVIEW_IMAGE])
|
|
|
|
for ui_component in get_ui_components(['target_image', 'target_video']):
|
|
for method in ['upload', 'change', 'clear']:
|
|
getattr(ui_component, method)(update_preview_frame_slider, outputs=PREVIEW_FRAME_SLIDER)
|
|
|
|
for ui_component in get_ui_components([
|
|
'face_debugger_items_checkbox_group',
|
|
'frame_colorizer_size_dropdown',
|
|
'face_mask_types_checkbox_group',
|
|
'face_mask_regions_checkbox_group'
|
|
]):
|
|
ui_component.change(update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
|
|
|
for ui_component in get_ui_components([
|
|
'age_modifier_model_dropdown',
|
|
'expression_restorer_model_dropdown',
|
|
'processors_checkbox_group',
|
|
'face_editor_model_dropdown',
|
|
'face_enhancer_model_dropdown',
|
|
'face_swapper_model_dropdown',
|
|
'face_swapper_pixel_boost_dropdown',
|
|
'frame_colorizer_model_dropdown',
|
|
'frame_enhancer_model_dropdown',
|
|
'lip_syncer_model_dropdown',
|
|
'face_selector_mode_dropdown',
|
|
'face_selector_order_dropdown',
|
|
'face_selector_gender_dropdown',
|
|
'face_selector_race_dropdown',
|
|
'face_detector_model_dropdown',
|
|
'face_detector_size_dropdown',
|
|
'face_detector_angles_checkbox_group',
|
|
'face_landmarker_model_dropdown'
|
|
]):
|
|
ui_component.change(clear_and_update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
|
|
|
for ui_component in get_ui_components([
|
|
'face_detector_score_slider',
|
|
'face_landmarker_score_slider'
|
|
]):
|
|
ui_component.release(clear_and_update_preview_image, inputs=PREVIEW_FRAME_SLIDER, outputs=PREVIEW_IMAGE)
|
|
|
|
|
|
def update_images(frame_number: int = 0) -> [gradio.Image, gradio.Image]:
|
|
preview_image = update_preview_image(frame_number)
|
|
original_image = update_original_frame(frame_number)
|
|
return [original_image, preview_image]
|
|
|
|
|
|
def clear_and_update_preview_image(frame_number: int = 0) -> gradio.Image:
|
|
clear_reference_faces()
|
|
clear_static_faces()
|
|
return update_preview_image(frame_number)
|
|
|
|
|
|
def update_preview_image(frame_number: int = 0) -> gradio.Image:
|
|
while process_manager.is_checking():
|
|
sleep(0.5)
|
|
conditional_append_reference_faces()
|
|
reference_faces = get_reference_faces() if 'reference' in state_manager.get_item('face_selector_mode') else None
|
|
source_frames = read_static_images(state_manager.get_item('source_paths'))
|
|
source_faces = get_many_faces(source_frames)
|
|
source_face = get_average_face(source_faces)
|
|
source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths')))
|
|
source_audio_frame = create_empty_audio_frame()
|
|
|
|
if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'):
|
|
reference_audio_frame_number = state_manager.get_item('reference_frame_number')
|
|
if state_manager.get_item('trim_frame_start'):
|
|
reference_audio_frame_number -= state_manager.get_item('trim_frame_start')
|
|
temp_audio_frame = get_audio_frame(source_audio_path, state_manager.get_item('output_video_fps'), reference_audio_frame_number)
|
|
if numpy.any(temp_audio_frame):
|
|
source_audio_frame = temp_audio_frame
|
|
|
|
if is_image(state_manager.get_item('target_path')):
|
|
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
|
|
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame)
|
|
preview_vision_frame = normalize_frame_color(preview_vision_frame)
|
|
return gradio.Image(value=preview_vision_frame)
|
|
|
|
if is_video(state_manager.get_item('target_path')):
|
|
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number)
|
|
preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame)
|
|
preview_vision_frame = normalize_frame_color(preview_vision_frame)
|
|
return gradio.Image(value=preview_vision_frame)
|
|
|
|
return gradio.Image(value=None)
|
|
|
|
|
|
def update_original_frame(frame_number: int = 0) -> gradio.Image:
|
|
if is_image(state_manager.get_item('target_path')):
|
|
target_vision_frame = read_static_image(state_manager.get_item('target_path'))
|
|
return gradio.Image(value=normalize_frame_color(target_vision_frame))
|
|
if is_video(state_manager.get_item('target_path')):
|
|
temp_vision_frame = get_video_frame(state_manager.get_item('target_path'), frame_number)
|
|
return gradio.Image(value=normalize_frame_color(temp_vision_frame))
|
|
return gradio.Image(value=None)
|
|
|
|
|
|
def update_preview_frame_slider() -> gradio.Slider:
|
|
if is_video(state_manager.get_item('target_path')):
|
|
video_frame_total = count_video_frame_total(state_manager.get_item('target_path'))
|
|
return gradio.Slider(maximum=video_frame_total, visible=True)
|
|
return gradio.Slider(value=0, visible=False)
|
|
|
|
|
|
def process_preview_frame(reference_faces: FaceSet, source_face: Face, source_audio_frame: AudioFrame, target_vision_frame: VisionFrame) -> VisionFrame:
|
|
source_vision_frame = target_vision_frame.copy()
|
|
if analyse_frame(target_vision_frame):
|
|
return cv2.GaussianBlur(target_vision_frame, (99, 99), 0)
|
|
|
|
for processor_module in get_processors_modules(state_manager.get_item('processors')):
|
|
logger.disable()
|
|
if processor_module.pre_process('preview'):
|
|
target_vision_frame = processor_module.process_frame({
|
|
'reference_faces': reference_faces,
|
|
'source_face': source_face,
|
|
'source_audio_frame': source_audio_frame,
|
|
'source_vision_frame': source_vision_frame,
|
|
'target_vision_frame': target_vision_frame
|
|
})
|
|
logger.enable()
|
|
return target_vision_frame
|
|
|