Spaces:
Paused
Paused
| import argparse | |
| import numpy as np | |
| import cv2 | |
| import os | |
| import gradio as gr | |
| import insightface | |
| from insightface.app import FaceAnalysis | |
| from dofaker import FaceSwapper, PoseSwapper | |
| def parse_args(): | |
| parser = argparse.ArgumentParser(description='β³ Running face swap') | |
| parser.add_argument( | |
| '--inbrowser', | |
| help='Automatically launch the interface in a new tab on the default browser.', | |
| dest='inbrowser', | |
| default=True) | |
| parser.add_argument( | |
| '--server_port', | |
| help='Start gradio app on this port (if available).', | |
| dest='server_port', | |
| type=int, | |
| default=None) | |
| return parser.parse_args() | |
| def swap_face(input_path, dst_path, src_path, use_enhancer, use_sr, scale, face_sim_thre): | |
| faker = FaceSwapper(use_enhancer=use_enhancer, use_sr=use_sr, scale=scale, face_sim_thre=face_sim_thre) | |
| output_path = faker.run(input_path, dst_path, src_path) | |
| return output_path | |
| def swap_pose(input_path, target_path, use_enhancer, use_sr, scale): | |
| faker = PoseSwapper(use_enhancer=use_enhancer, use_sr=use_sr, scale=scale) | |
| output_path = faker.run(input_path, target_path) | |
| return output_path | |
| def add_bbox_padding(bbox, margin=5): | |
| return [ | |
| bbox[0] - margin, | |
| bbox[1] - margin, | |
| bbox[2] + margin, | |
| bbox[3] + margin | |
| ] | |
| def select_handler(img, evt): | |
| faces = app.get(img) | |
| faces = sorted(faces, key=lambda x: x.bbox[0]) | |
| cropped_image = [] | |
| face_index = -1 | |
| sel_face_index = 0 | |
| for face in faces: | |
| box = face.bbox.astype(np.int32) | |
| face_index += 1 | |
| if point_in_box((box[0], box[1]), (box[2], box[3]), (evt.index[0], evt.index[1])): | |
| margin = int((box[2] - box[0]) * 0.35) | |
| box = add_bbox_padding(box, margin) | |
| box = np.clip(box, 0, None) | |
| sel_face_index = face_index | |
| cropped_image = img[box[1]:box[3], box[0]:box[2]] | |
| return cropped_image, sel_face_index | |
| def point_in_box(bl, tr, p): | |
| return bl[0] < p[0] < tr[0] and bl[1] < p[1] < tr[1] | |
| def get_faces(img): | |
| faces = app.get(img) | |
| faces = sorted(faces, key=lambda x: x.bbox[0]) | |
| return img, len(faces) | |
| def main(): | |
| args = parse_args() | |
| app = FaceAnalysis(name='buffalo_l') | |
| app.prepare(ctx_id=0, det_size=(640, 640)) | |
| swapper = insightface.model_zoo.get_model('inswapper_128.onnx', download=True, download_zip=True) | |
| with gr.Blocks(theme='ysharma/huggingface@=0.0.4') as web_ui: | |
| gr.Markdown('# π FaceClone') | |
| gr.Markdown('### Deepfake Swap Face and Pose.') | |
| with gr.Tab('π§© FaceSwapper'): | |
| gr.Markdown(' π§Έ FaceClone: Powered by Gradio') | |
| with gr.Tab('π₯ Face'): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown('ποΈ Source Face to be swapped') | |
| image_input = gr.Image(type='filepath') | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown('π Target source included with Face') | |
| dst_face_image = gr.Image(type='filepath') | |
| with gr.Column(): | |
| gr.Markdown('π§β𦳠Source Face to Replace Target Face') | |
| src_face_image = gr.Image(type='filepath') | |
| with gr.Column(): | |
| output_image = gr.Image(type='filepath') | |
| use_enhancer = gr.Checkbox(label="π Face Enhance", info="π§Ώ Whether use face enhance model.") | |
| with gr.Row(): | |
| use_sr = gr.Checkbox(label="π οΈ Super resolution", info="βοΈ Whether use image resolution model.") | |
| scale = gr.Number(value=1, label='π Image super resolution scale') | |
| with gr.Row(): | |
| face_sim_thre = gr.Number(value=0.6, label='π§² Face similarity threshold', minimum=0.0, maximum=1.0) | |
| convert_button = gr.Button('π Swap') | |
| convert_button.click(fn=swap_face, inputs=[image_input, dst_face_image, src_face_image, use_enhancer, use_sr, scale, face_sim_thre], outputs=[output_image]) | |
| with gr.Tab('π₯οΈ Video'): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown('πΊ Target Video') | |
| video_input = gr.Video() | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown('π³ Target Face') | |
| dst_face_image = gr.Image(type='filepath') | |
| with gr.Column(): | |
| gr.Markdown('π« Source Face') | |
| src_face_image = gr.Image(type='filepath') | |
| with gr.Column(): | |
| output_video = gr.Video() | |
| use_enhancer = gr.Checkbox(label="πΈ Face Enhance", info="π· Whether use face enhance model.") | |
| with gr.Row(): | |
| use_sr = gr.Checkbox(label="π Super Resolution", info="π¦ Whether use image resolution model.") | |
| scale = gr.Number(value=1, label='ποΈ Super Resolution Image') | |
| with gr.Row(): | |
| face_sim_thre = gr.Number(value=0.6, label='π Face similarity threshold', minimum=0.0, maximum=1.0) | |
| convert_button = gr.Button('π― Swap') | |
| convert_button.click(fn=swap_face, inputs=[video_input, dst_face_image, src_face_image, use_enhancer, use_sr, scale, face_sim_thre], outputs=[output_video]) | |
| with gr.Tab('πͺ© PoseSwapper'): | |
| gr.Markdown(' π§Έ FaceClone: Deepfake powered by Gradio') | |
| with gr.Tab('π· Image'): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown('π§β𦳠The source image to be swapped') | |
| image_input = gr.Image(type='filepath') | |
| gr.Markdown('π³οΈ The target image with pose') | |
| target = gr.Image(type='filepath') | |
| with gr.Column(): | |
| output_image = gr.Image(type='filepath') | |
| use_enhancer = gr.Checkbox(label="π Face Enhance", info="π§Ύ Whether use face enhance model.") | |
| with gr.Row(): | |
| use_sr = gr.Checkbox(label="π οΈ Super Resolution", info="β Whether use image resolution model.") | |
| scale = gr.Number(value=1, label='πΈ Super resolution scale') | |
| convert_button = gr.Button('π― Swap') | |
| convert_button.click(fn=swap_pose, inputs=[image_input, target, use_enhancer, use_sr, scale], outputs=[output_image]) | |
| web_ui.launch(inbrowser=args.inbrowser, server_port=args.server_port) | |
| if __name__ == '__main__': | |
| main() | |