michaelj commited on
Commit
8fb085a
1 Parent(s): 43cdf68

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .editorconfig +8 -0
  2. .flake8 +3 -0
  3. .gitattributes +5 -0
  4. .github/FUNDING.yml +2 -0
  5. .github/preview.png +3 -0
  6. .github/workflows/ci.yml +35 -0
  7. .gitignore +10 -0
  8. Dockerfile +15 -0
  9. LICENSE.md +3 -0
  10. README.md +5 -6
  11. facefusion/__init__.py +0 -0
  12. facefusion/api/__init__.py +0 -0
  13. facefusion/api/core.py +81 -0
  14. facefusion/api/model.py +99 -0
  15. facefusion/api/test/test.py +48 -0
  16. facefusion/choices.py +26 -0
  17. facefusion/content_analyser.py +102 -0
  18. facefusion/core.py +362 -0
  19. facefusion/face_analyser.py +309 -0
  20. facefusion/face_cache.py +29 -0
  21. facefusion/face_helper.py +149 -0
  22. facefusion/face_reference.py +21 -0
  23. facefusion/globals.py +50 -0
  24. facefusion/installer.py +63 -0
  25. facefusion/metadata.py +13 -0
  26. facefusion/processors/__init__.py +0 -0
  27. facefusion/processors/frame/__init__.py +0 -0
  28. facefusion/processors/frame/choices.py +13 -0
  29. facefusion/processors/frame/core.py +105 -0
  30. facefusion/processors/frame/globals.py +10 -0
  31. facefusion/processors/frame/modules/__init__.py +0 -0
  32. facefusion/processors/frame/modules/face_blur.py +277 -0
  33. facefusion/processors/frame/modules/face_debugger.py +123 -0
  34. facefusion/processors/frame/modules/face_enhancer.py +222 -0
  35. facefusion/processors/frame/modules/face_swapper.py +283 -0
  36. facefusion/processors/frame/modules/frame_enhancer.py +165 -0
  37. facefusion/processors/frame/typings.py +7 -0
  38. facefusion/typing.py +41 -0
  39. facefusion/uis/__init__.py +0 -0
  40. facefusion/uis/assets/fixes.css +7 -0
  41. facefusion/uis/assets/overrides.css +44 -0
  42. facefusion/uis/choices.py +7 -0
  43. facefusion/uis/components/__init__.py +0 -0
  44. facefusion/uis/components/about.py +23 -0
  45. facefusion/uis/components/benchmark.py +131 -0
  46. facefusion/uis/components/benchmark_options.py +29 -0
  47. facefusion/uis/components/common_options.py +38 -0
  48. facefusion/uis/components/execution.py +34 -0
  49. facefusion/uis/components/execution_queue_count.py +28 -0
  50. facefusion/uis/components/execution_thread_count.py +29 -0
.editorconfig ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ root = true
2
+
3
+ [*]
4
+ end_of_line = lf
5
+ insert_final_newline = true
6
+ indent_size = 4
7
+ indent_style = tab
8
+ trim_trailing_whitespace = true
.flake8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [flake8]
2
+ select = E3, E4, F
3
+ per-file-ignores = facefusion/core.py:E402, facefusion/installer.py:E402
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ .github/preview.png filter=lfs diff=lfs merge=lfs -text
37
+ temp/target/test-1703080901.png filter=lfs diff=lfs merge=lfs -text
38
+ temp/target/test-1703080957.png filter=lfs diff=lfs merge=lfs -text
39
+ temp/target/test-1703080999.png filter=lfs diff=lfs merge=lfs -text
40
+ temp/target/test-1703081020.png filter=lfs diff=lfs merge=lfs -text
.github/FUNDING.yml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ github: henryruhs
2
+ custom: https://paypal.me/henryruhs
.github/preview.png ADDED

Git LFS Details

  • SHA256: 18b390233d30eb7a755fae40fcce26aec0936c2d5ab2cb2787a97c6dd436b063
  • Pointer size: 132 Bytes
  • Size of remote file: 1.2 MB
.github/workflows/ci.yml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ci
2
+
3
+ on: [ push, pull_request ]
4
+
5
+ jobs:
6
+ lint:
7
+ runs-on: ubuntu-latest
8
+ steps:
9
+ - name: Checkout
10
+ uses: actions/checkout@v2
11
+ - name: Set up Python 3.10
12
+ uses: actions/setup-python@v2
13
+ with:
14
+ python-version: '3.10'
15
+ - run: pip install flake8
16
+ - run: pip install mypy
17
+ - run: flake8 run.py facefusion tests
18
+ - run: mypy run.py facefusion tests
19
+ test:
20
+ strategy:
21
+ matrix:
22
+ os: [ macos-latest, ubuntu-latest, windows-latest ]
23
+ runs-on: ${{ matrix.os }}
24
+ steps:
25
+ - name: Checkout
26
+ uses: actions/checkout@v2
27
+ - name: Set up ffmpeg
28
+ uses: FedericoCarboni/setup-ffmpeg@v2
29
+ - name: Set up Python 3.10
30
+ uses: actions/setup-python@v2
31
+ with:
32
+ python-version: '3.10'
33
+ - run: python install.py --torch cpu --onnxruntime default
34
+ - run: pip install pytest
35
+ - run: pytest
.gitignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ .assets
2
+ .idea
3
+ .vscode
4
+ **/venv/
5
+ **/__pycache__/
6
+ **/pyvenv.cfg
7
+ **/local_cache/
8
+ **/*.DS_Store
9
+ **/*.jpg
10
+ **/*.mp4
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ubuntu:20.04
2
+
3
+ RUN apt-get update && apt-get install -y ffmpeg
4
+
5
+ # 安装项目依赖
6
+ RUN pip install -r requirements.txt
7
+
8
+ # 复制项目代码到容器
9
+ COPY . /app
10
+
11
+ # 设置工作目录
12
+ WORKDIR /app
13
+
14
+ # 启动 FaceFusion API
15
+ CMD ["python", "run.py", "--api"]
LICENSE.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ MIT license
2
+
3
+ Copyright (c) 2023 Henry Ruhs
README.md CHANGED
@@ -1,10 +1,9 @@
1
  ---
2
- title: Facefusionapi
3
- emoji: 📈
4
- colorFrom: pink
5
- colorTo: yellow
6
  sdk: docker
7
- pinned: false
8
  ---
9
-
10
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: facefusionapi
3
+ emoji: 🐳
4
+ colorFrom: purple
5
+ colorTo: gray
6
  sdk: docker
7
+ app_port: 7860
8
  ---
 
9
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
facefusion/__init__.py ADDED
File without changes
facefusion/api/__init__.py ADDED
File without changes
facefusion/api/core.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import base64
3
+ import time
4
+ from fastapi import FastAPI, APIRouter, Body
5
+
6
+ from facefusion.api.model import Params, print_globals
7
+ import facefusion.globals as globals
8
+ import facefusion.processors.frame.globals as frame_processors_globals
9
+ from facefusion import core
10
+ from facefusion.utilities import normalize_output_path
11
+
12
+ app = FastAPI()
13
+ router = APIRouter()
14
+
15
+ @router.post("/")
16
+ async def process_frames(params: Params = Body(...)) -> dict:
17
+ delete_files_in_directory('/workspaces/facefusion-api/facefusion/api/temp/source')
18
+ delete_files_in_directory('/workspaces/facefusion-api/facefusion/api/temp/target')
19
+ delete_files_in_directory('/workspaces/facefusion-api/facefusion/api/temp/output')
20
+
21
+ if not (params.source or params.target):
22
+ return {"message": "Source image or path is required"}
23
+
24
+ update_global_variables(params)
25
+
26
+ globals.source_path = f"/workspaces/facefusion-api/facefusion/api/temp/source/{params.user_id}-{int(time.time())}.{params.source_type}"
27
+ globals.target_path = f"/workspaces/facefusion-api/facefusion/api/temp/target/{params.user_id}-{int(time.time())}.{params.target_type}"
28
+ globals.output_path = f"/workspaces/facefusion-api/facefusion/api/temp/output/{params.user_id}-{int(time.time())}.{params.target_type}"
29
+
30
+ print(globals.output_path)
31
+
32
+ print_globals()
33
+
34
+ # save_file(globals.source_path, params.source)
35
+ # save_file(globals.target_path, params.target)
36
+
37
+ try:
38
+ core.api_conditional_process()
39
+ except Exception as e:
40
+ print(e)
41
+ return {"message": "Error"}
42
+ output = image_to_base64_str(globals.output_path)
43
+ return {'output': output}
44
+
45
+ def update_global_variables(params: Params):
46
+ for var_name, value in vars(params).items():
47
+ if value is not None:
48
+ if hasattr(globals, var_name):
49
+ setattr(globals, var_name, value)
50
+ elif hasattr(frame_processors_globals, var_name):
51
+ setattr(frame_processors_globals, var_name, value)
52
+
53
+ def image_to_base64_str(image_path):
54
+ with open(image_path, "rb") as image_file:
55
+ encoded_string = base64.b64encode(image_file.read())
56
+ return encoded_string.decode('utf-8')
57
+
58
+ def save_file(file_path: str, encoded_image: str):
59
+ data = base64.b64decode(encoded_image)
60
+
61
+ directory = os.path.dirname(file_path)
62
+
63
+ if not os.path.exists(directory):
64
+ os.makedirs(directory)
65
+
66
+ with open(file_path, "wb") as file:
67
+ file.write(data)
68
+
69
+ def delete_files_in_directory(directory_path):
70
+ for filename in os.listdir(directory_path):
71
+ file_path = os.path.join(directory_path, filename)
72
+ if os.path.isfile(file_path):
73
+ os.remove(file_path)
74
+ print(f"Deleted {file_path}")
75
+
76
+
77
+ app.include_router(router)
78
+
79
+ def launch():
80
+ import uvicorn
81
+ uvicorn.run(app, host="0.0.0.0", port=7860)
facefusion/api/model.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Optional, List
3
+ from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
4
+ from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
5
+
6
+ class Params(BaseModel):
7
+ user_id : str
8
+ source : Optional[str]
9
+ target : str
10
+ source_type: str
11
+ target_type: str
12
+
13
+ # execution
14
+ execution_providers: Optional[List[str]] = ['CPUExecutionProvider']
15
+ execution_thread_count: Optional[int] = 4
16
+ execution_queue_count: Optional[int] = 1
17
+ max_memory: Optional[int] = 0
18
+
19
+ # face analyser
20
+ face_analyser_order: Optional[FaceAnalyserOrder] = 'left-right'
21
+ face_analyser_age: Optional[FaceAnalyserAge] = None
22
+ face_analyser_gender: Optional[FaceAnalyserGender] = None
23
+ face_detector_model: Optional[FaceDetectorModel] = 'retinaface'
24
+ face_detector_size: Optional[str] = '640x640'
25
+ face_detector_score: Optional[float] = 0.5
26
+ face_recognizer_model: Optional[FaceRecognizerModel] = 'arcface_inswapper'
27
+
28
+ # face selector
29
+ face_selector_mode: Optional[FaceSelectorMode] = 'reference'
30
+ reference_face_position: Optional[int] = 0
31
+ reference_face_distance: Optional[float] = 0.6
32
+ reference_frame_number: Optional[int] = 0
33
+
34
+ # face mask
35
+ face_mask_blur: Optional[float] = 0.3
36
+ face_mask_padding: Optional[Padding] = (0, 0, 0, 0)
37
+
38
+ # frame extraction
39
+ trim_frame_start: Optional[int] = None
40
+ trim_frame_end: Optional[int] = None
41
+ temp_frame_format: Optional[TempFrameFormat] = 'jpg'
42
+ temp_frame_quality: Optional[int] = 100
43
+ keep_temp: Optional[bool] = False
44
+
45
+ # output creation
46
+ output_image_quality: Optional[int] = 80
47
+ output_video_encoder: Optional[OutputVideoEncoder] = 'libx264'
48
+ output_video_quality: Optional[int] = 80
49
+ keep_fps: Optional[bool] = False
50
+ skip_audio: Optional[bool] = False
51
+
52
+ # frame processors
53
+ frame_processors: List[str] = ['face_blur']
54
+
55
+ face_swapper_model: Optional[FaceSwapperModel] = 'inswapper_128'
56
+ face_enhancer_model: Optional[FaceEnhancerModel] = 'gfpgan_1.4'
57
+ face_enhancer_blend: Optional[int] = 80
58
+ frame_enhancer_model: Optional[FrameEnhancerModel] = 'real_esrgan_x2plus'
59
+ frame_enhancer_blend: Optional[int] = 80
60
+ face_debugger_items: Optional[List[FaceDebuggerItem]] = ['kps', 'face-mask']
61
+
62
+
63
+ import facefusion.globals as globals
64
+ import facefusion.processors.frame.globals as frame_processors_globals
65
+ def print_globals():
66
+ print(f'execution_providers: {globals.execution_providers}')
67
+ print(f'execution_thread_count: {globals.execution_thread_count}')
68
+ print(f'execution_queue_count: {globals.execution_queue_count}')
69
+ print(f'max_memory: {globals.max_memory}')
70
+ print(f'face_analyser_order: {globals.face_analyser_order}')
71
+ print(f'face_analyser_age: {globals.face_analyser_age}')
72
+ print(f'face_analyser_gender: {globals.face_analyser_gender}')
73
+ print(f'face_detector_model: {globals.face_detector_model}')
74
+ print(f'face_detector_size: {globals.face_detector_size}')
75
+ print(f'face_detector_score: {globals.face_detector_score}')
76
+ print(f'face_recognizer_model: {globals.face_recognizer_model}')
77
+ print(f'face_selector_mode: {globals.face_selector_mode}')
78
+ print(f'reference_face_position: {globals.reference_face_position}')
79
+ print(f'reference_face_distance: {globals.reference_face_distance}')
80
+ print(f'reference_frame_number: {globals.reference_frame_number}')
81
+ print(f'face_mask_blur: {globals.face_mask_blur}')
82
+ print(f'face_mask_padding: {globals.face_mask_padding}')
83
+ print(f'trim_frame_start: {globals.trim_frame_start}')
84
+ print(f'trim_frame_end: {globals.trim_frame_end}')
85
+ print(f'temp_frame_format: {globals.temp_frame_format}')
86
+ print(f'temp_frame_quality: {globals.temp_frame_quality}')
87
+ print(f'keep_temp: {globals.keep_temp}')
88
+ print(f'output_image_quality: {globals.output_image_quality}')
89
+ print(f'output_video_encoder: {globals.output_video_encoder}')
90
+ print(f'output_video_quality: {globals.output_video_quality}')
91
+ print(f'keep_fps: {globals.keep_fps}')
92
+ print(f'skip_audio: {globals.skip_audio}')
93
+ print(f'frame_processors: {globals.frame_processors}')
94
+ print(f'face_swapper_model: {frame_processors_globals.face_swapper_model}')
95
+ print(f'face_enhancer_model: {frame_processors_globals.face_enhancer_model}')
96
+ print(f'face_enhancer_blend: {frame_processors_globals.face_enhancer_blend}')
97
+ print(f'frame_enhancer_model: {frame_processors_globals.frame_enhancer_model}')
98
+ print(f'frame_enhancer_blend: {frame_processors_globals.frame_enhancer_blend}')
99
+ print(f'face_debugger_items: {frame_processors_globals.face_debugger_items}')
facefusion/api/test/test.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import requests
3
+ import time
4
+
5
+ def image_to_base64_str(image_path):
6
+ with open(image_path, "rb") as image_file:
7
+ encoded_string = base64.b64encode(image_file.read())
8
+ return encoded_string.decode('utf-8')
9
+
10
+ source_image_path = 'source.jpg'
11
+ target_image_path = 'target.jpg'
12
+
13
+ source_str = image_to_base64_str(source_image_path)
14
+ target_str = image_to_base64_str(target_image_path)
15
+
16
+ params = {
17
+ 'user_id': 'test',
18
+ 'source': source_str,
19
+ 'target': target_str,
20
+ 'source_type': 'jpg',
21
+ 'target_type': 'jpg',
22
+ 'frame_processors': ['face_swapper','face_enhancer'],#,'face_enhancer'
23
+ # 'face_mask_blur': 0.5,
24
+ # 'face_mask_padding': [5, 5, 5, 5],
25
+ 'face_enhancer_model': 'gfpgan_1.4',
26
+ 'keep_fps': True,
27
+ "output_image_quality":100,
28
+ 'execution_thread_count': 40,
29
+ 'face_selector_mode': 'one',
30
+ }
31
+
32
+ url = 'http://0.0.0.0:7860/'
33
+ response = requests.post(url, json=params)
34
+
35
+ # ステータスコードとレスポンスの内容を確認
36
+ print("Status Code:", response.status_code)
37
+ print("Response Body:")
38
+ start=time.time()
39
+ # ステータスコードが200の場合のみ処理を進める
40
+ if response.status_code == 200:
41
+ output_data = base64.b64decode(response.json()['output'])
42
+ # print("response.json()",response.json())
43
+ with open(f'/workspaces/facefusion-api/facefusion/api/temp/output/{int(time.time())}a.jpg', 'wb') as f:
44
+ f.write(output_data)
45
+ end=time.time()
46
+ print("时间",end-start)
47
+ else:
48
+ print("Error: The request did not succeed.")
facefusion/choices.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import numpy
4
+
5
+ from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
6
+
7
+
8
+ face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
9
+ face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
10
+ face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ]
11
+ face_detector_models : List[str] = [ 'retinaface', 'yunet' ]
12
+ face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
13
+ face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ]
14
+ temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ]
15
+ output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
16
+
17
+ execution_thread_count_range : List[int] = numpy.arange(1, 129, 1).tolist()
18
+ execution_queue_count_range : List[int] = numpy.arange(1, 33, 1).tolist()
19
+ max_memory_range : List[int] = numpy.arange(0, 129, 1).tolist()
20
+ face_detector_score_range : List[float] = numpy.arange(0.0, 1.05, 0.05).tolist()
21
+ face_mask_blur_range : List[float] = numpy.arange(0.0, 1.05, 0.05).tolist()
22
+ face_mask_padding_range : List[float] = numpy.arange(0, 101, 1).tolist()
23
+ reference_face_distance_range : List[float] = numpy.arange(0.0, 1.55, 0.05).tolist()
24
+ temp_frame_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
25
+ output_image_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
26
+ output_video_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
facefusion/content_analyser.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict
2
+ from functools import lru_cache
3
+ import threading
4
+ import cv2
5
+ import numpy
6
+ import onnxruntime
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.globals
10
+ from facefusion import wording
11
+ from facefusion.typing import Frame, ModelValue
12
+ from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_fps
13
+ from facefusion.utilities import resolve_relative_path, conditional_download
14
+
15
+ CONTENT_ANALYSER = None
16
+ THREAD_LOCK : threading.Lock = threading.Lock()
17
+ MODELS : Dict[str, ModelValue] =\
18
+ {
19
+ 'open_nsfw':
20
+ {
21
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/open_nsfw.onnx',
22
+ 'path': resolve_relative_path('../.assets/models/open_nsfw.onnx')
23
+ }
24
+ }
25
+ MAX_PROBABILITY = 0.80
26
+ MAX_RATE = 5
27
+ STREAM_COUNTER = 0
28
+
29
+
30
+ def get_content_analyser() -> Any:
31
+ global CONTENT_ANALYSER
32
+
33
+ with THREAD_LOCK:
34
+ if CONTENT_ANALYSER is None:
35
+ model_path = MODELS.get('open_nsfw').get('path')
36
+ CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
37
+ return CONTENT_ANALYSER
38
+
39
+
40
+ def clear_content_analyser() -> None:
41
+ global CONTENT_ANALYSER
42
+
43
+ CONTENT_ANALYSER = None
44
+
45
+
46
+ def pre_check() -> bool:
47
+ if not facefusion.globals.skip_download:
48
+ download_directory_path = resolve_relative_path('../.assets/models')
49
+ model_url = MODELS.get('open_nsfw').get('url')
50
+ conditional_download(download_directory_path, [ model_url ])
51
+ return True
52
+
53
+
54
+ def analyse_stream(frame : Frame, fps : float) -> bool:
55
+ global STREAM_COUNTER
56
+
57
+ STREAM_COUNTER = STREAM_COUNTER + 1
58
+ if STREAM_COUNTER % int(fps) == 0:
59
+ return analyse_frame(frame)
60
+ return False
61
+
62
+
63
+ def prepare_frame(frame : Frame) -> Frame:
64
+ frame = cv2.resize(frame, (224, 224)).astype(numpy.float32)
65
+ frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32)
66
+ frame = numpy.expand_dims(frame, axis = 0)
67
+ return frame
68
+
69
+
70
+ def analyse_frame(frame : Frame) -> bool:
71
+ content_analyser = get_content_analyser()
72
+ frame = prepare_frame(frame)
73
+ probability = content_analyser.run(None,
74
+ {
75
+ 'input:0': frame
76
+ })[0][0][1]
77
+ return probability > MAX_PROBABILITY
78
+
79
+
80
+ @lru_cache(maxsize = None)
81
+ def analyse_image(image_path : str) -> bool:
82
+ frame = read_image(image_path)
83
+ return analyse_frame(frame)
84
+
85
+
86
+ @lru_cache(maxsize = None)
87
+ def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool:
88
+ video_frame_total = count_video_frame_total(video_path)
89
+ fps = detect_fps(video_path)
90
+ frame_range = range(start_frame or 0, end_frame or video_frame_total)
91
+ rate = 0.0
92
+ counter = 0
93
+ with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =') as progress:
94
+ for frame_number in frame_range:
95
+ if frame_number % int(fps) == 0:
96
+ frame = get_video_frame(video_path, frame_number)
97
+ if analyse_frame(frame):
98
+ counter += 1
99
+ rate = counter * int(fps) / len(frame_range) * 100
100
+ progress.update()
101
+ progress.set_postfix(rate = rate)
102
+ return rate > MAX_RATE
facefusion/core.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.environ['OMP_NUM_THREADS'] = '1'
4
+
5
+ import signal
6
+ import sys
7
+ import warnings
8
+ import platform
9
+ import shutil
10
+ import onnxruntime
11
+ from argparse import ArgumentParser, HelpFormatter
12
+
13
+ import facefusion.choices
14
+ import facefusion.globals
15
+ from facefusion.face_analyser import get_one_face
16
+ from facefusion.face_reference import get_face_reference, set_face_reference
17
+ from facefusion.vision import get_video_frame, read_image
18
+ from facefusion import face_analyser, content_analyser, metadata, wording
19
+ from facefusion.content_analyser import analyse_image, analyse_video
20
+ from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module, api_get_frame_processors_modules
21
+ from facefusion.utilities import is_image, is_video, detect_fps, compress_image, merge_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, list_module_names, encode_execution_providers, decode_execution_providers, normalize_output_path, normalize_padding, create_metavar, update_status
22
+
23
+ onnxruntime.set_default_logger_severity(3)
24
+ warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
25
+ warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
26
+
27
+
28
+ def cli() -> None:
29
+ signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
30
+ program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
31
+ # api
32
+ program.add_argument('--api', help='Run in API mode', action='store_true', dest='api_mode')
33
+ # general
34
+ program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
35
+ program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
36
+ program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
37
+ program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
38
+ # misc
39
+ group_misc = program.add_argument_group('misc')
40
+ group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), dest = 'skip_download', action = 'store_true')
41
+ group_misc.add_argument('--headless', help = wording.get('headless_help'), dest = 'headless', action = 'store_true')
42
+ # execution
43
+ group_execution = program.add_argument_group('execution')
44
+ group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help'), dest = 'execution_providers', default = [ 'cpu' ], choices = encode_execution_providers(onnxruntime.get_available_providers()), nargs = '+')
45
+ group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = 4, choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
46
+ group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1, choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
47
+ group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int, choices = facefusion.choices.max_memory_range, metavar = create_metavar(facefusion.choices.max_memory_range))
48
+ # face analyser
49
+ group_face_analyser = program.add_argument_group('face analyser')
50
+ group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), dest = 'face_analyser_order', default = 'left-right', choices = facefusion.choices.face_analyser_orders)
51
+ group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_ages)
52
+ group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_genders)
53
+ group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), dest = 'face_detector_model', default = 'retinaface', choices = facefusion.choices.face_detector_models)
54
+ group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), dest = 'face_detector_size', default = '640x640', choices = facefusion.choices.face_detector_sizes)
55
+ group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), dest = 'face_detector_score', type = float, default = 0.5, choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
56
+ # face selector
57
+ group_face_selector = program.add_argument_group('face selector')
58
+ group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), dest = 'face_selector_mode', default = 'reference', choices = facefusion.choices.face_selector_modes)
59
+ group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
60
+ group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 0.6, choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
61
+ group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
62
+ # face mask
63
+ group_face_mask = program.add_argument_group('face mask')
64
+ group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), dest = 'face_mask_blur', type = float, default = 0.3, choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
65
+ group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), dest = 'face_mask_padding', type = int, default = [ 0, 0, 0, 0 ], nargs = '+')
66
+ # frame extraction
67
+ group_frame_extraction = program.add_argument_group('frame extraction')
68
+ group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
69
+ group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
70
+ group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_formats)
71
+ group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
72
+ group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action = 'store_true')
73
+ # output creation
74
+ group_output_creation = program.add_argument_group('output creation')
75
+ group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), dest = 'output_image_quality', type = int, default = 80, choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
76
+ group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoders)
77
+ group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 80, choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
78
+ group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action = 'store_true')
79
+ group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action = 'store_true')
80
+ # frame processors
81
+ available_frame_processors = list_module_names('facefusion/processors/frame/modules')
82
+ program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
83
+ group_frame_processors = program.add_argument_group('frame processors')
84
+ group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), dest = 'frame_processors', default = [ 'face_swapper' ], nargs = '+')
85
+ for frame_processor in available_frame_processors:
86
+ frame_processor_module = load_frame_processor_module(frame_processor)
87
+ frame_processor_module.register_args(group_frame_processors)
88
+ # uis
89
+ group_uis = program.add_argument_group('uis')
90
+ group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), dest = 'ui_layouts', default = [ 'default' ], nargs = '+')
91
+ run(program)
92
+
93
+
94
+ def apply_args(program : ArgumentParser) -> None:
95
+ args = program.parse_args()
96
+ # api
97
+ facefusion.globals.api_mode = args.api_mode
98
+ # general
99
+ facefusion.globals.source_path = args.source_path
100
+ facefusion.globals.target_path = args.target_path
101
+ facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, args.output_path)
102
+ # misc
103
+ facefusion.globals.skip_download = args.skip_download
104
+ facefusion.globals.headless = args.headless
105
+ # execution
106
+ facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
107
+ facefusion.globals.execution_thread_count = args.execution_thread_count
108
+ facefusion.globals.execution_queue_count = args.execution_queue_count
109
+ facefusion.globals.max_memory = args.max_memory
110
+ # face analyser
111
+ facefusion.globals.face_analyser_order = args.face_analyser_order
112
+ facefusion.globals.face_analyser_age = args.face_analyser_age
113
+ facefusion.globals.face_analyser_gender = args.face_analyser_gender
114
+ facefusion.globals.face_detector_model = args.face_detector_model
115
+ facefusion.globals.face_detector_size = args.face_detector_size
116
+ facefusion.globals.face_detector_score = args.face_detector_score
117
+ # face selector
118
+ facefusion.globals.face_selector_mode = args.face_selector_mode
119
+ facefusion.globals.reference_face_position = args.reference_face_position
120
+ facefusion.globals.reference_face_distance = args.reference_face_distance
121
+ facefusion.globals.reference_frame_number = args.reference_frame_number
122
+ # face mask
123
+ facefusion.globals.face_mask_blur = args.face_mask_blur
124
+ facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
125
+ # frame extraction
126
+ facefusion.globals.trim_frame_start = args.trim_frame_start
127
+ facefusion.globals.trim_frame_end = args.trim_frame_end
128
+ facefusion.globals.temp_frame_format = args.temp_frame_format
129
+ facefusion.globals.temp_frame_quality = args.temp_frame_quality
130
+ facefusion.globals.keep_temp = args.keep_temp
131
+ # output creation
132
+ facefusion.globals.output_image_quality = args.output_image_quality
133
+ facefusion.globals.output_video_encoder = args.output_video_encoder
134
+ facefusion.globals.output_video_quality = args.output_video_quality
135
+ facefusion.globals.keep_fps = args.keep_fps
136
+ facefusion.globals.skip_audio = args.skip_audio
137
+ # frame processors
138
+ available_frame_processors = list_module_names('facefusion/processors/frame/modules')
139
+ # facefusion.globals.frame_processors = args.frame_processors
140
+ facefusion.globals.frame_processors = ['face_swapper','face_enhancer']
141
+ print("有哪些参数")
142
+ for frame_processor in available_frame_processors:
143
+ frame_processor_module = load_frame_processor_module(frame_processor)
144
+ frame_processor_module.apply_args(program)
145
+ # uis
146
+ facefusion.globals.ui_layouts = args.ui_layouts
147
+
148
+
149
+ def run(program : ArgumentParser) -> None:
150
+ apply_args(program)
151
+ limit_resources()
152
+ if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check():
153
+ return
154
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
155
+ if not frame_processor_module.pre_check():
156
+ return
157
+ if facefusion.globals.headless:
158
+ conditional_process()
159
+ elif facefusion.globals.api_mode:
160
+ import facefusion.api.core as api
161
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
162
+ print("开始检查",frame_processor_module)
163
+ if not frame_processor_module.pre_check():
164
+ return
165
+ api.launch()
166
+ else:
167
+ import facefusion.uis.core as ui
168
+
169
+ for ui_layout in ui.get_ui_layouts_modules(facefusion.globals.ui_layouts):
170
+ if not ui_layout.pre_check():
171
+ return
172
+ ui.launch()
173
+
174
+
175
+ def destroy() -> None:
176
+ if facefusion.globals.target_path:
177
+ clear_temp(facefusion.globals.target_path)
178
+ sys.exit()
179
+
180
+
181
+ def limit_resources() -> None:
182
+ if facefusion.globals.max_memory:
183
+ memory = facefusion.globals.max_memory * 1024 ** 3
184
+ if platform.system().lower() == 'darwin':
185
+ memory = facefusion.globals.max_memory * 1024 ** 6
186
+ if platform.system().lower() == 'windows':
187
+ import ctypes
188
+ kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
189
+ kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
190
+ else:
191
+ import resource
192
+ resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
193
+
194
+
195
+ def pre_check() -> bool:
196
+ if sys.version_info < (3, 9):
197
+ update_status(wording.get('python_not_supported').format(version = '3.9'))
198
+ return False
199
+ if not shutil.which('ffmpeg'):
200
+ update_status(wording.get('ffmpeg_not_installed'))
201
+ return False
202
+ return True
203
+
204
+
205
+ def conditional_process() -> None:
206
+ conditional_set_face_reference()
207
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
208
+ if not frame_processor_module.pre_process('output'):
209
+ return
210
+ if is_image(facefusion.globals.target_path):
211
+ process_image()
212
+ if is_video(facefusion.globals.target_path):
213
+ process_video()
214
+
215
+
216
+ def api_conditional_process() -> None:
217
+ conditional_set_face_reference()
218
+ for frame_processor_module in api_get_frame_processors_modules(facefusion.globals.frame_processors):
219
+ if not frame_processor_module.pre_process('output'):
220
+ return
221
+ if is_image(facefusion.globals.target_path):
222
+ api_process_image()
223
+ if is_video(facefusion.globals.target_path):
224
+ api_process_video()
225
+
226
+
227
+ def conditional_set_face_reference() -> None:
228
+ if 'reference' in facefusion.globals.face_selector_mode and not get_face_reference():
229
+ if is_video(facefusion.globals.target_path):
230
+ reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
231
+ else:
232
+ reference_frame = read_image(facefusion.globals.target_path)
233
+ reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
234
+ set_face_reference(reference_face)
235
+
236
+
237
+ def process_image() -> None:
238
+ # if analyse_image(facefusion.globals.target_path):
239
+ # return
240
+ shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
241
+ # process frame
242
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
243
+ update_status(wording.get('processing'), frame_processor_module.NAME)
244
+ frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path)
245
+ frame_processor_module.post_process()
246
+ # compress image
247
+ update_status(wording.get('compressing_image'))
248
+ if not compress_image(facefusion.globals.output_path):
249
+ update_status(wording.get('compressing_image_failed'))
250
+ # validate image
251
+ if is_image(facefusion.globals.output_path):
252
+ update_status(wording.get('processing_image_succeed'))
253
+ else:
254
+ update_status(wording.get('processing_image_failed'))
255
+
256
+
257
+ def process_video() -> None:
258
+ # if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end):
259
+ # return
260
+ fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
261
+ # create temp
262
+ update_status(wording.get('creating_temp'))
263
+ create_temp(facefusion.globals.target_path)
264
+ # extract frames
265
+ update_status(wording.get('extracting_frames_fps').format(fps = fps))
266
+ extract_frames(facefusion.globals.target_path, fps)
267
+ # process frame
268
+ temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
269
+ if temp_frame_paths:
270
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
271
+ update_status(wording.get('processing'), frame_processor_module.NAME)
272
+ frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths)
273
+ frame_processor_module.post_process()
274
+ else:
275
+ update_status(wording.get('temp_frames_not_found'))
276
+ return
277
+ # merge video
278
+ update_status(wording.get('merging_video_fps').format(fps = fps))
279
+ if not merge_video(facefusion.globals.target_path, fps):
280
+ update_status(wording.get('merging_video_failed'))
281
+ return
282
+ # handle audio
283
+ if facefusion.globals.skip_audio:
284
+ update_status(wording.get('skipping_audio'))
285
+ move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
286
+ else:
287
+ update_status(wording.get('restoring_audio'))
288
+ if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path):
289
+ update_status(wording.get('restoring_audio_failed'))
290
+ move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
291
+ # clear temp
292
+ update_status(wording.get('clearing_temp'))
293
+ clear_temp(facefusion.globals.target_path)
294
+ # validate video
295
+ if is_video(facefusion.globals.output_path):
296
+ update_status(wording.get('processing_video_succeed'))
297
+ else:
298
+ update_status(wording.get('processing_video_failed'))
299
+
300
+
301
+ def api_process_image() -> None:
302
+ # if analyse_image(facefusion.globals.target_path):
303
+ # return
304
+ shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
305
+ # process frame
306
+ for frame_processor_module in api_get_frame_processors_modules(facefusion.globals.frame_processors):
307
+ update_status(wording.get('processing'), frame_processor_module.NAME)
308
+ frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path)
309
+ frame_processor_module.post_process()
310
+ # compress image
311
+ update_status(wording.get('compressing_image'))
312
+ if not compress_image(facefusion.globals.output_path):
313
+ update_status(wording.get('compressing_image_failed'))
314
+ # validate image
315
+ if is_image(facefusion.globals.output_path):
316
+ update_status(wording.get('processing_image_succeed'))
317
+ else:
318
+ update_status(wording.get('processing_image_failed'))
319
+
320
+
321
+ def api_process_video() -> None:
322
+ # if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end):
323
+ # return
324
+ fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
325
+ # create temp
326
+ update_status(wording.get('creating_temp'))
327
+ create_temp(facefusion.globals.target_path)
328
+ # extract frames
329
+ update_status(wording.get('extracting_frames_fps').format(fps = fps))
330
+ extract_frames(facefusion.globals.target_path, fps)
331
+ # process frame
332
+ temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
333
+ if temp_frame_paths:
334
+ for frame_processor_module in api_get_frame_processors_modules(facefusion.globals.frame_processors):
335
+ update_status(wording.get('processing'), frame_processor_module.NAME)
336
+ frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths)
337
+ frame_processor_module.post_process()
338
+ else:
339
+ update_status(wording.get('temp_frames_not_found'))
340
+ return
341
+ # merge video
342
+ update_status(wording.get('merging_video_fps').format(fps = fps))
343
+ if not merge_video(facefusion.globals.target_path, fps):
344
+ update_status(wording.get('merging_video_failed'))
345
+ return
346
+ # handle audio
347
+ if facefusion.globals.skip_audio:
348
+ update_status(wording.get('skipping_audio'))
349
+ move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
350
+ else:
351
+ update_status(wording.get('restoring_audio'))
352
+ if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path):
353
+ update_status(wording.get('restoring_audio_failed'))
354
+ move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
355
+ # clear temp
356
+ update_status(wording.get('clearing_temp'))
357
+ clear_temp(facefusion.globals.target_path)
358
+ # validate video
359
+ if is_video(facefusion.globals.output_path):
360
+ update_status(wording.get('processing_video_succeed'))
361
+ else:
362
+ update_status(wording.get('processing_video_failed'))
facefusion/face_analyser.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, List, Dict, Tuple
2
+ import threading
3
+ import cv2
4
+ import numpy
5
+ import onnxruntime
6
+
7
+ import facefusion.globals
8
+ from facefusion.face_cache import get_faces_cache, set_faces_cache
9
+ from facefusion.face_helper import warp_face, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms
10
+ from facefusion.typing import Frame, Face, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelValue, Bbox, Kps, Score, Embedding
11
+ from facefusion.utilities import resolve_relative_path, conditional_download
12
+ from facefusion.vision import resize_frame_dimension
13
+
14
+ FACE_ANALYSER = None
15
+ THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
16
+ THREAD_LOCK : threading.Lock = threading.Lock()
17
+ MODELS : Dict[str, ModelValue] =\
18
+ {
19
+ 'face_detector_retinaface':
20
+ {
21
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/retinaface_10g.onnx',
22
+ 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx')
23
+ },
24
+ 'face_detector_yunet':
25
+ {
26
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yunet_2023mar.onnx',
27
+ 'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx')
28
+ },
29
+ 'face_recognizer_arcface_blendface':
30
+ {
31
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
32
+ 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
33
+ },
34
+ 'face_recognizer_arcface_inswapper':
35
+ {
36
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
37
+ 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
38
+ },
39
+ 'face_recognizer_arcface_simswap':
40
+ {
41
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_simswap.onnx',
42
+ 'path': resolve_relative_path('../.assets/models/arcface_simswap.onnx')
43
+ },
44
+ 'gender_age':
45
+ {
46
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gender_age.onnx',
47
+ 'path': resolve_relative_path('../.assets/models/gender_age.onnx')
48
+ }
49
+ }
50
+
51
+
52
+ def get_face_analyser() -> Any:
53
+ global FACE_ANALYSER
54
+
55
+ with THREAD_LOCK:
56
+ if FACE_ANALYSER is None:
57
+ if facefusion.globals.face_detector_model == 'retinaface':
58
+ face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = facefusion.globals.execution_providers)
59
+ if facefusion.globals.face_detector_model == 'yunet':
60
+ face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
61
+ if facefusion.globals.face_recognizer_model == 'arcface_blendface':
62
+ face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendface').get('path'), providers = facefusion.globals.execution_providers)
63
+ if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
64
+ face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = facefusion.globals.execution_providers)
65
+ if facefusion.globals.face_recognizer_model == 'arcface_simswap':
66
+ face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = facefusion.globals.execution_providers)
67
+ gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = facefusion.globals.execution_providers)
68
+ FACE_ANALYSER =\
69
+ {
70
+ 'face_detector': face_detector,
71
+ 'face_recognizer': face_recognizer,
72
+ 'gender_age': gender_age
73
+ }
74
+ return FACE_ANALYSER
75
+
76
+
77
+ def clear_face_analyser() -> Any:
78
+ global FACE_ANALYSER
79
+
80
+ FACE_ANALYSER = None
81
+
82
+
83
+ def pre_check() -> bool:
84
+ if not facefusion.globals.skip_download:
85
+ download_directory_path = resolve_relative_path('../.assets/models')
86
+ model_urls =\
87
+ [
88
+ MODELS.get('face_detector_retinaface').get('url'),
89
+ MODELS.get('face_detector_yunet').get('url'),
90
+ MODELS.get('face_recognizer_arcface_inswapper').get('url'),
91
+ MODELS.get('face_recognizer_arcface_simswap').get('url'),
92
+ MODELS.get('gender_age').get('url')
93
+ ]
94
+ conditional_download(download_directory_path, model_urls)
95
+ return True
96
+
97
+
98
+ def extract_faces(frame: Frame) -> List[Face]:
99
+ face_detector_width, face_detector_height = map(int, facefusion.globals.face_detector_size.split('x'))
100
+ frame_height, frame_width, _ = frame.shape
101
+ temp_frame = resize_frame_dimension(frame, face_detector_width, face_detector_height)
102
+ temp_frame_height, temp_frame_width, _ = temp_frame.shape
103
+ ratio_height = frame_height / temp_frame_height
104
+ ratio_width = frame_width / temp_frame_width
105
+ if facefusion.globals.face_detector_model == 'retinaface':
106
+ bbox_list, kps_list, score_list = detect_with_retinaface(temp_frame, temp_frame_height, temp_frame_width, face_detector_height, face_detector_width, ratio_height, ratio_width)
107
+ return create_faces(frame, bbox_list, kps_list, score_list)
108
+ elif facefusion.globals.face_detector_model == 'yunet':
109
+ bbox_list, kps_list, score_list = detect_with_yunet(temp_frame, temp_frame_height, temp_frame_width, ratio_height, ratio_width)
110
+ return create_faces(frame, bbox_list, kps_list, score_list)
111
+ return []
112
+
113
+
114
+ def detect_with_retinaface(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, face_detector_height : int, face_detector_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
115
+ face_detector = get_face_analyser().get('face_detector')
116
+ bbox_list = []
117
+ kps_list = []
118
+ score_list = []
119
+ feature_strides = [ 8, 16, 32 ]
120
+ feature_map_channel = 3
121
+ anchor_total = 2
122
+ prepare_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
123
+ prepare_frame[:temp_frame_height, :temp_frame_width, :] = temp_frame
124
+ temp_frame = (prepare_frame - 127.5) / 128.0
125
+ temp_frame = numpy.expand_dims(temp_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
126
+ with THREAD_SEMAPHORE:
127
+ detections = face_detector.run(None,
128
+ {
129
+ face_detector.get_inputs()[0].name: temp_frame
130
+ })
131
+ for index, feature_stride in enumerate(feature_strides):
132
+ keep_indices = numpy.where(detections[index] >= facefusion.globals.face_detector_score)[0]
133
+ if keep_indices.any():
134
+ stride_height = face_detector_height // feature_stride
135
+ stride_width = face_detector_width // feature_stride
136
+ anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
137
+ bbox_raw = (detections[index + feature_map_channel] * feature_stride)
138
+ kps_raw = detections[index + feature_map_channel * 2] * feature_stride
139
+ for bbox in distance_to_bbox(anchors, bbox_raw)[keep_indices]:
140
+ bbox_list.append(numpy.array(
141
+ [
142
+ bbox[0] * ratio_width,
143
+ bbox[1] * ratio_height,
144
+ bbox[2] * ratio_width,
145
+ bbox[3] * ratio_height
146
+ ]))
147
+ for kps in distance_to_kps(anchors, kps_raw)[keep_indices]:
148
+ kps_list.append(kps * [ ratio_width, ratio_height ])
149
+ for score in detections[index][keep_indices]:
150
+ score_list.append(score[0])
151
+ return bbox_list, kps_list, score_list
152
+
153
+
154
+ def detect_with_yunet(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
155
+ face_detector = get_face_analyser().get('face_detector')
156
+ face_detector.setInputSize((temp_frame_width, temp_frame_height))
157
+ face_detector.setScoreThreshold(facefusion.globals.face_detector_score)
158
+ bbox_list = []
159
+ kps_list = []
160
+ score_list = []
161
+ with THREAD_SEMAPHORE:
162
+ _, detections = face_detector.detect(temp_frame)
163
+ if detections.any():
164
+ for detection in detections:
165
+ bbox_list.append(numpy.array(
166
+ [
167
+ detection[0] * ratio_width,
168
+ detection[1] * ratio_height,
169
+ (detection[0] + detection[2]) * ratio_width,
170
+ (detection[1] + detection[3]) * ratio_height
171
+ ]))
172
+ kps_list.append(detection[4:14].reshape((5, 2)) * [ ratio_width, ratio_height])
173
+ score_list.append(detection[14])
174
+ return bbox_list, kps_list, score_list
175
+
176
+
177
+ def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face] :
178
+ faces : List[Face] = []
179
+ if facefusion.globals.face_detector_score > 0:
180
+ keep_indices = apply_nms(bbox_list, 0.4)
181
+ for index in keep_indices:
182
+ bbox = bbox_list[index]
183
+ kps = kps_list[index]
184
+ score = score_list[index]
185
+ embedding, normed_embedding = calc_embedding(frame, kps)
186
+ gender, age = detect_gender_age(frame, kps)
187
+ faces.append(Face(
188
+ bbox = bbox,
189
+ kps = kps,
190
+ score = score,
191
+ embedding = embedding,
192
+ normed_embedding = normed_embedding,
193
+ gender = gender,
194
+ age = age
195
+ ))
196
+ return faces
197
+
198
+
199
+ def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]:
200
+ face_recognizer = get_face_analyser().get('face_recognizer')
201
+ crop_frame, matrix = warp_face(temp_frame, kps, 'arcface_v2', (112, 112))
202
+ crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1
203
+ crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1)
204
+ crop_frame = numpy.expand_dims(crop_frame, axis = 0)
205
+ embedding = face_recognizer.run(None,
206
+ {
207
+ face_recognizer.get_inputs()[0].name: crop_frame
208
+ })[0]
209
+ embedding = embedding.ravel()
210
+ normed_embedding = embedding / numpy.linalg.norm(embedding)
211
+ return embedding, normed_embedding
212
+
213
+
214
+ def detect_gender_age(frame : Frame, kps : Kps) -> Tuple[int, int]:
215
+ gender_age = get_face_analyser().get('gender_age')
216
+ crop_frame, affine_matrix = warp_face(frame, kps, 'arcface_v2', (96, 96))
217
+ crop_frame = numpy.expand_dims(crop_frame, axis = 0).transpose(0, 3, 1, 2).astype(numpy.float32)
218
+ prediction = gender_age.run(None,
219
+ {
220
+ gender_age.get_inputs()[0].name: crop_frame
221
+ })[0][0]
222
+ gender = int(numpy.argmax(prediction[:2]))
223
+ age = int(numpy.round(prediction[2] * 100))
224
+ return gender, age
225
+
226
+
227
+ def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
228
+ many_faces = get_many_faces(frame)
229
+ if many_faces:
230
+ try:
231
+ return many_faces[position]
232
+ except IndexError:
233
+ return many_faces[-1]
234
+ return None
235
+
236
+
237
+ def get_many_faces(frame : Frame) -> List[Face]:
238
+ try:
239
+ faces_cache = get_faces_cache(frame)
240
+ if faces_cache:
241
+ faces = faces_cache
242
+ else:
243
+ faces = extract_faces(frame)
244
+ set_faces_cache(frame, faces)
245
+ if facefusion.globals.face_analyser_order:
246
+ faces = sort_by_order(faces, facefusion.globals.face_analyser_order)
247
+ if facefusion.globals.face_analyser_age:
248
+ faces = filter_by_age(faces, facefusion.globals.face_analyser_age)
249
+ if facefusion.globals.face_analyser_gender:
250
+ faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender)
251
+ return faces
252
+ except (AttributeError, ValueError):
253
+ return []
254
+
255
+
256
+ def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]:
257
+ many_faces = get_many_faces(frame)
258
+ similar_faces = []
259
+ if many_faces:
260
+ for face in many_faces:
261
+ if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
262
+ current_face_distance = 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
263
+ if current_face_distance < face_distance:
264
+ similar_faces.append(face)
265
+ return similar_faces
266
+
267
+
268
+ def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]:
269
+ if order == 'left-right':
270
+ return sorted(faces, key = lambda face: face.bbox[0])
271
+ if order == 'right-left':
272
+ return sorted(faces, key = lambda face: face.bbox[0], reverse = True)
273
+ if order == 'top-bottom':
274
+ return sorted(faces, key = lambda face: face.bbox[1])
275
+ if order == 'bottom-top':
276
+ return sorted(faces, key = lambda face: face.bbox[1], reverse = True)
277
+ if order == 'small-large':
278
+ return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]))
279
+ if order == 'large-small':
280
+ return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]), reverse = True)
281
+ if order == 'best-worst':
282
+ return sorted(faces, key = lambda face: face.score, reverse = True)
283
+ if order == 'worst-best':
284
+ return sorted(faces, key = lambda face: face.score)
285
+ return faces
286
+
287
+
288
+ def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
289
+ filter_faces = []
290
+ for face in faces:
291
+ if face.age < 13 and age == 'child':
292
+ filter_faces.append(face)
293
+ elif face.age < 19 and age == 'teen':
294
+ filter_faces.append(face)
295
+ elif face.age < 60 and age == 'adult':
296
+ filter_faces.append(face)
297
+ elif face.age > 59 and age == 'senior':
298
+ filter_faces.append(face)
299
+ return filter_faces
300
+
301
+
302
+ def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
303
+ filter_faces = []
304
+ for face in faces:
305
+ if face.gender == 0 and gender == 'female':
306
+ filter_faces.append(face)
307
+ if face.gender == 1 and gender == 'male':
308
+ filter_faces.append(face)
309
+ return filter_faces
facefusion/face_cache.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List, Dict
2
+ import hashlib
3
+
4
+ from facefusion.typing import Frame, Face
5
+
6
+ FACES_CACHE : Dict[str, List[Face]] = {}
7
+
8
+
9
+ def get_faces_cache(frame : Frame) -> Optional[List[Face]]:
10
+ frame_hash = create_frame_hash(frame)
11
+ if frame_hash in FACES_CACHE:
12
+ return FACES_CACHE[frame_hash]
13
+ return None
14
+
15
+
16
+ def set_faces_cache(frame : Frame, faces : List[Face]) -> None:
17
+ frame_hash = create_frame_hash(frame)
18
+ if frame_hash:
19
+ FACES_CACHE[frame_hash] = faces
20
+
21
+
22
+ def clear_faces_cache() -> None:
23
+ global FACES_CACHE
24
+
25
+ FACES_CACHE = {}
26
+
27
+
28
+ def create_frame_hash(frame : Frame) -> Optional[str]:
29
+ return hashlib.sha1(frame.tobytes()).hexdigest() if frame.any() else None
facefusion/face_helper.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Tuple, List
2
+ from functools import lru_cache
3
+ from cv2.typing import Size
4
+ import cv2
5
+ import numpy
6
+
7
+ from facefusion.typing import Bbox, Kps, Frame, Matrix, Template, Padding
8
+
9
+ TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
10
+ {
11
+ 'arcface_v1': numpy.array(
12
+ [
13
+ [ 39.7300, 51.1380 ],
14
+ [ 72.2700, 51.1380 ],
15
+ [ 56.0000, 68.4930 ],
16
+ [ 42.4630, 87.0100 ],
17
+ [ 69.5370, 87.0100 ]
18
+ ]),
19
+ 'arcface_v2': numpy.array(
20
+ [
21
+ [ 38.2946, 51.6963 ],
22
+ [ 73.5318, 51.5014 ],
23
+ [ 56.0252, 71.7366 ],
24
+ [ 41.5493, 92.3655 ],
25
+ [ 70.7299, 92.2041 ]
26
+ ]),
27
+ 'ffhq': numpy.array(
28
+ [
29
+ [ 192.98138, 239.94708 ],
30
+ [ 318.90277, 240.1936 ],
31
+ [ 256.63416, 314.01935 ],
32
+ [ 201.26117, 371.41043 ],
33
+ [ 313.08905, 371.15118 ]
34
+ ])
35
+ }
36
+
37
+
38
+ def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:
39
+ normed_template = TEMPLATES.get(template) * size[1] / size[0]
40
+ affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.LMEDS)[0]
41
+ crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)
42
+ return crop_frame, affine_matrix
43
+
44
+
45
+ def paste_back(temp_frame : Frame, crop_frame: Frame, affine_matrix : Matrix, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
46
+ inverse_matrix = cv2.invertAffineTransform(affine_matrix)
47
+ temp_frame_size = temp_frame.shape[:2][::-1]
48
+ mask_size = tuple(crop_frame.shape[:2])
49
+ mask_frame = create_static_mask_frame(mask_size, face_mask_blur, face_mask_padding)
50
+ inverse_mask_frame = cv2.warpAffine(mask_frame, inverse_matrix, temp_frame_size).clip(0, 1)
51
+ inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
52
+ paste_frame = temp_frame.copy()
53
+ paste_frame[:, :, 0] = inverse_mask_frame * inverse_crop_frame[:, :, 0] + (1 - inverse_mask_frame) * temp_frame[:, :, 0]
54
+ paste_frame[:, :, 1] = inverse_mask_frame * inverse_crop_frame[:, :, 1] + (1 - inverse_mask_frame) * temp_frame[:, :, 1]
55
+ paste_frame[:, :, 2] = inverse_mask_frame * inverse_crop_frame[:, :, 2] + (1 - inverse_mask_frame) * temp_frame[:, :, 2]
56
+ return paste_frame
57
+
58
+
59
+ def paste_back_ellipse(temp_frame : Frame, crop_frame: Frame, affine_matrix : Matrix, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
60
+ inverse_matrix = cv2.invertAffineTransform(affine_matrix)
61
+ temp_frame_size = temp_frame.shape[:2][::-1]
62
+ mask_size = tuple(crop_frame.shape[:2])
63
+ mask_frame = create_ellipse_mask_frame(mask_size, face_mask_blur, face_mask_padding)
64
+ inverse_mask_frame = cv2.warpAffine(mask_frame, inverse_matrix, temp_frame_size).clip(0, 1)
65
+ inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
66
+ paste_frame = temp_frame.copy()
67
+ paste_frame[:, :, 0] = inverse_mask_frame * inverse_crop_frame[:, :, 0] + (1 - inverse_mask_frame) * temp_frame[:, :, 0]
68
+ paste_frame[:, :, 1] = inverse_mask_frame * inverse_crop_frame[:, :, 1] + (1 - inverse_mask_frame) * temp_frame[:, :, 1]
69
+ paste_frame[:, :, 2] = inverse_mask_frame * inverse_crop_frame[:, :, 2] + (1 - inverse_mask_frame) * temp_frame[:, :, 2]
70
+ return paste_frame
71
+
72
+
73
+ @lru_cache(maxsize = None)
74
+ def create_static_mask_frame(mask_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
75
+ mask_frame = numpy.ones(mask_size, numpy.float32)
76
+ blur_amount = int(mask_size[0] * 0.5 * face_mask_blur)
77
+ blur_area = max(blur_amount // 2, 1)
78
+ mask_frame[:max(blur_area, int(mask_size[1] * face_mask_padding[0] / 100)), :] = 0
79
+ mask_frame[-max(blur_area, int(mask_size[1] * face_mask_padding[2] / 100)):, :] = 0
80
+ mask_frame[:, :max(blur_area, int(mask_size[0] * face_mask_padding[3] / 100))] = 0
81
+ mask_frame[:, -max(blur_area, int(mask_size[0] * face_mask_padding[1] / 100)):] = 0
82
+ if blur_amount > 0:
83
+ mask_frame = cv2.GaussianBlur(mask_frame, (0, 0), blur_amount * 0.25)
84
+ return mask_frame
85
+
86
+
87
+ @lru_cache(maxsize=None)
88
+ def create_ellipse_mask_frame(mask_size: Size, face_mask_blur: float, face_mask_padding: Padding) -> Frame:
89
+ mask_frame = numpy.zeros(mask_size, numpy.float32)
90
+ center = (mask_size[1] // 2, mask_size[0] // 2)
91
+ axes = (max(1, mask_size[1] // 2 - int(mask_size[1] * face_mask_padding[1] / 100)),
92
+ max(1, mask_size[0] // 2 - int(mask_size[0] * face_mask_padding[0] / 100)))
93
+ cv2.ellipse(mask_frame, center, axes, 0, 0, 360, 1, -1)
94
+
95
+ if face_mask_blur > 0:
96
+ blur_amount = int(mask_size[0] * 0.5 * face_mask_blur)
97
+ mask_frame = cv2.GaussianBlur(mask_frame, (0, 0), blur_amount * 0.25)
98
+
99
+ return mask_frame
100
+
101
+
102
+
103
+ @lru_cache(maxsize = None)
104
+ def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]:
105
+ y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
106
+ anchors = numpy.stack((y, x), axis = -1)
107
+ anchors = (anchors * feature_stride).reshape((-1, 2))
108
+ anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2))
109
+ return anchors
110
+
111
+
112
+ def distance_to_bbox(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Bbox:
113
+ x1 = points[:, 0] - distance[:, 0]
114
+ y1 = points[:, 1] - distance[:, 1]
115
+ x2 = points[:, 0] + distance[:, 2]
116
+ y2 = points[:, 1] + distance[:, 3]
117
+ bbox = numpy.column_stack([ x1, y1, x2, y2 ])
118
+ return bbox
119
+
120
+
121
+ def distance_to_kps(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Kps:
122
+ x = points[:, 0::2] + distance[:, 0::2]
123
+ y = points[:, 1::2] + distance[:, 1::2]
124
+ kps = numpy.stack((x, y), axis = -1)
125
+ return kps
126
+
127
+
128
+ def apply_nms(bbox_list : List[Bbox], iou_threshold : float) -> List[int]:
129
+ keep_indices = []
130
+ dimension_list = numpy.reshape(bbox_list, (-1, 4))
131
+ x1 = dimension_list[:, 0]
132
+ y1 = dimension_list[:, 1]
133
+ x2 = dimension_list[:, 2]
134
+ y2 = dimension_list[:, 3]
135
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
136
+ indices = numpy.arange(len(bbox_list))
137
+ while indices.size > 0:
138
+ index = indices[0]
139
+ remain_indices = indices[1:]
140
+ keep_indices.append(index)
141
+ xx1 = numpy.maximum(x1[index], x1[remain_indices])
142
+ yy1 = numpy.maximum(y1[index], y1[remain_indices])
143
+ xx2 = numpy.minimum(x2[index], x2[remain_indices])
144
+ yy2 = numpy.minimum(y2[index], y2[remain_indices])
145
+ width = numpy.maximum(0, xx2 - xx1 + 1)
146
+ height = numpy.maximum(0, yy2 - yy1 + 1)
147
+ iou = width * height / (areas[index] + areas[remain_indices] - width * height)
148
+ indices = indices[numpy.where(iou <= iou_threshold)[0] + 1]
149
+ return keep_indices
facefusion/face_reference.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from facefusion.typing import Face
4
+
5
+ FACE_REFERENCE = None
6
+
7
+
8
+ def get_face_reference() -> Optional[Face]:
9
+ return FACE_REFERENCE
10
+
11
+
12
+ def set_face_reference(face : Face) -> None:
13
+ global FACE_REFERENCE
14
+
15
+ FACE_REFERENCE = face
16
+
17
+
18
+ def clear_face_reference() -> None:
19
+ global FACE_REFERENCE
20
+
21
+ FACE_REFERENCE = None
facefusion/globals.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
4
+
5
+ # api
6
+ api_mode : Optional[bool] = None
7
+ # general
8
+ source_path : Optional[str] = None
9
+ target_path : Optional[str] = None
10
+ output_path : Optional[str] = None
11
+ # misc
12
+ skip_download : Optional[bool] = None
13
+ headless : Optional[bool] = None
14
+ # execution
15
+ execution_providers : List[str] = []
16
+ execution_thread_count : Optional[int] = None
17
+ execution_queue_count : Optional[int] = None
18
+ max_memory : Optional[int] = None
19
+ # face analyser
20
+ face_analyser_order : Optional[FaceAnalyserOrder] = None
21
+ face_analyser_age : Optional[FaceAnalyserAge] = None
22
+ face_analyser_gender : Optional[FaceAnalyserGender] = None
23
+ face_detector_model : Optional[FaceDetectorModel] = None
24
+ face_detector_size : Optional[str] = None
25
+ face_detector_score : Optional[float] = None
26
+ face_recognizer_model : Optional[FaceRecognizerModel] = None
27
+ # face selector
28
+ face_selector_mode : Optional[FaceSelectorMode] = None
29
+ reference_face_position : Optional[int] = None
30
+ reference_face_distance : Optional[float] = None
31
+ reference_frame_number : Optional[int] = None
32
+ # face mask
33
+ face_mask_blur : Optional[float] = None
34
+ face_mask_padding : Optional[Padding] = None
35
+ # frame extraction
36
+ trim_frame_start : Optional[int] = None
37
+ trim_frame_end : Optional[int] = None
38
+ temp_frame_format : Optional[TempFrameFormat] = None
39
+ temp_frame_quality : Optional[int] = None
40
+ keep_temp : Optional[bool] = None
41
+ # output creation
42
+ output_image_quality : Optional[int] = None
43
+ output_video_encoder : Optional[OutputVideoEncoder] = None
44
+ output_video_quality : Optional[int] = None
45
+ keep_fps : Optional[bool] = None
46
+ skip_audio : Optional[bool] = None
47
+ # frame processors
48
+ frame_processors : List[str] = []
49
+ # uis
50
+ ui_layouts : List[str] = []
facefusion/installer.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Tuple
2
+ import subprocess
3
+ from argparse import ArgumentParser, HelpFormatter
4
+
5
+ subprocess.call([ 'pip', 'install' , 'inquirer', '-q' ])
6
+
7
+ import inquirer
8
+
9
+ from facefusion import metadata, wording
10
+
11
+ TORCH : Dict[str, str] =\
12
+ {
13
+ 'default': 'default',
14
+ 'cpu': 'cpu',
15
+ 'cuda': 'cu118',
16
+ 'rocm': 'rocm5.6'
17
+ }
18
+ ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\
19
+ {
20
+ 'default': ('onnxruntime', '1.16.3'),
21
+ 'cuda': ('onnxruntime-gpu', '1.16.3'),
22
+ 'coreml-legacy': ('onnxruntime-coreml', '1.13.1'),
23
+ 'coreml-silicon': ('onnxruntime-silicon', '1.16.0'),
24
+ 'directml': ('onnxruntime-directml', '1.16.3'),
25
+ 'openvino': ('onnxruntime-openvino', '1.16.0')
26
+ }
27
+
28
+
29
+ def cli() -> None:
30
+ program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
31
+ program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), dest = 'torch', choices = TORCH.keys())
32
+ program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), dest = 'onnxruntime', choices = ONNXRUNTIMES.keys())
33
+ program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
34
+ run(program)
35
+
36
+
37
+ def run(program : ArgumentParser) -> None:
38
+ args = program.parse_args()
39
+
40
+ if args.torch and args.onnxruntime:
41
+ answers =\
42
+ {
43
+ 'torch': args.torch,
44
+ 'onnxruntime': args.onnxruntime
45
+ }
46
+ else:
47
+ answers = inquirer.prompt(
48
+ [
49
+ inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())),
50
+ inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys()))
51
+ ])
52
+ if answers:
53
+ torch = answers['torch']
54
+ torch_wheel = TORCH[torch]
55
+ onnxruntime = answers['onnxruntime']
56
+ onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime]
57
+ subprocess.call([ 'pip', 'uninstall', 'torch', '-y' ])
58
+ if torch_wheel == 'default':
59
+ subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ])
60
+ else:
61
+ subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ])
62
+ subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y' ])
63
+ subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ])
facefusion/metadata.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ METADATA =\
2
+ {
3
+ 'name': 'FaceFusion',
4
+ 'description': 'Next generation face swapper and enhancer',
5
+ 'version': '2.0.0',
6
+ 'license': 'MIT',
7
+ 'author': 'Henry Ruhs',
8
+ 'url': 'https://facefusion.io'
9
+ }
10
+
11
+
12
+ def get(key : str) -> str:
13
+ return METADATA[key]
facefusion/processors/__init__.py ADDED
File without changes
facefusion/processors/frame/__init__.py ADDED
File without changes
facefusion/processors/frame/choices.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import numpy
3
+
4
+ from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
5
+
6
+ face_swapper_models : List[FaceSwapperModel] = [ 'blendface_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
7
+ face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ]
8
+ frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
9
+
10
+ face_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
11
+ frame_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
12
+
13
+ face_debugger_items : List[FaceDebuggerItem] = [ 'bbox', 'kps', 'face-mask', 'score' ]
facefusion/processors/frame/core.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import importlib
3
+ from concurrent.futures import ThreadPoolExecutor, as_completed
4
+ from queue import Queue
5
+ from types import ModuleType
6
+ from typing import Any, List
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.globals
10
+ from facefusion.typing import Process_Frames
11
+ from facefusion import wording
12
+ from facefusion.utilities import encode_execution_providers
13
+
14
+ FRAME_PROCESSORS_MODULES : List[ModuleType] = []
15
+ FRAME_PROCESSORS_METHODS =\
16
+ [
17
+ 'get_frame_processor',
18
+ 'clear_frame_processor',
19
+ 'get_options',
20
+ 'set_options',
21
+ 'register_args',
22
+ 'apply_args',
23
+ 'pre_check',
24
+ 'pre_process',
25
+ 'process_frame',
26
+ 'process_frames',
27
+ 'process_image',
28
+ 'process_video',
29
+ 'post_process'
30
+ ]
31
+
32
+
33
+ def load_frame_processor_module(frame_processor : str) -> Any:
34
+ try:
35
+ frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor)
36
+ for method_name in FRAME_PROCESSORS_METHODS:
37
+ if not hasattr(frame_processor_module, method_name):
38
+ raise NotImplementedError
39
+ except ModuleNotFoundError:
40
+ sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
41
+ except NotImplementedError:
42
+ sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
43
+ return frame_processor_module
44
+
45
+
46
+ def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]:
47
+ global FRAME_PROCESSORS_MODULES
48
+
49
+ if not FRAME_PROCESSORS_MODULES:
50
+ for frame_processor in frame_processors:
51
+ frame_processor_module = load_frame_processor_module(frame_processor)
52
+ FRAME_PROCESSORS_MODULES.append(frame_processor_module)
53
+ return FRAME_PROCESSORS_MODULES
54
+
55
+
56
+ def api_get_frame_processors_modules(frame_processors: List[str]) -> List[ModuleType]:
57
+ frame_processors_modules = [] # ローカル変数を使用
58
+ for frame_processor in frame_processors:
59
+ frame_processor_module = load_frame_processor_module(frame_processor)
60
+ frame_processors_modules.append(frame_processor_module)
61
+ return frame_processors_modules
62
+
63
+
64
+
65
+ def clear_frame_processors_modules() -> None:
66
+ global FRAME_PROCESSORS_MODULES
67
+
68
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
69
+ frame_processor_module.clear_frame_processor()
70
+ FRAME_PROCESSORS_MODULES = []
71
+
72
+
73
+ def multi_process_frames(source_path : str, temp_frame_paths : List[str], process_frames : Process_Frames) -> None:
74
+ with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =') as progress:
75
+ progress.set_postfix(
76
+ {
77
+ 'execution_providers': encode_execution_providers(facefusion.globals.execution_providers),
78
+ 'execution_thread_count': facefusion.globals.execution_thread_count,
79
+ 'execution_queue_count': facefusion.globals.execution_queue_count
80
+ })
81
+ with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
82
+ futures = []
83
+ queue_temp_frame_paths : Queue[str] = create_queue(temp_frame_paths)
84
+ queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
85
+ while not queue_temp_frame_paths.empty():
86
+ payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future)
87
+ future = executor.submit(process_frames, source_path, payload_temp_frame_paths, progress.update)
88
+ futures.append(future)
89
+ for future_done in as_completed(futures):
90
+ future_done.result()
91
+
92
+
93
+ def create_queue(temp_frame_paths : List[str]) -> Queue[str]:
94
+ queue : Queue[str] = Queue()
95
+ for frame_path in temp_frame_paths:
96
+ queue.put(frame_path)
97
+ return queue
98
+
99
+
100
+ def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]:
101
+ queues = []
102
+ for _ in range(queue_per_future):
103
+ if not queue.empty():
104
+ queues.append(queue.get())
105
+ return queues
facefusion/processors/frame/globals.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
4
+
5
+ face_swapper_model : Optional[FaceSwapperModel] = None
6
+ face_enhancer_model : Optional[FaceEnhancerModel] = None
7
+ face_enhancer_blend : Optional[int] = None
8
+ frame_enhancer_model : Optional[FrameEnhancerModel] = None
9
+ frame_enhancer_blend : Optional[int] = None
10
+ face_debugger_items : Optional[List[FaceDebuggerItem]] = None
facefusion/processors/frame/modules/__init__.py ADDED
File without changes
facefusion/processors/frame/modules/face_blur.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Dict, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ import threading
4
+ import numpy
5
+ import onnx
6
+ import onnxruntime
7
+ from onnx import numpy_helper
8
+ import cv2
9
+
10
+ import facefusion.globals
11
+ import facefusion.processors.frame.core as frame_processors
12
+ from facefusion import wording
13
+ from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser
14
+ from facefusion.face_helper import warp_face, paste_back_ellipse
15
+ from facefusion.face_reference import get_face_reference
16
+ from facefusion.content_analyser import clear_content_analyser
17
+ from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel, Embedding
18
+ from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, update_status
19
+ from facefusion.vision import read_image, read_static_image, write_image
20
+ from facefusion.processors.frame import globals as frame_processors_globals
21
+ from facefusion.processors.frame import choices as frame_processors_choices
22
+
23
+ FRAME_PROCESSOR = None
24
+ MODEL_MATRIX = None
25
+ THREAD_LOCK : threading.Lock = threading.Lock()
26
+ NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_BLUR'
27
+ MODELS : Dict[str, ModelValue] =\
28
+ {
29
+ 'blendface_256':
30
+ {
31
+ 'type': 'blendface',
32
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendface_256.onnx',
33
+ 'path': resolve_relative_path('../.assets/models/blendface_256.onnx'),
34
+ 'template': 'ffhq',
35
+ 'size': (512, 256),
36
+ 'mean': [ 0.0, 0.0, 0.0 ],
37
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
38
+ },
39
+ 'inswapper_128':
40
+ {
41
+ 'type': 'inswapper',
42
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx',
43
+ 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
44
+ 'template': 'arcface_v2',
45
+ 'size': (128, 128),
46
+ 'mean': [ 0.0, 0.0, 0.0 ],
47
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
48
+ },
49
+ 'inswapper_128_fp16':
50
+ {
51
+ 'type': 'inswapper',
52
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx',
53
+ 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
54
+ 'template': 'arcface_v2',
55
+ 'size': (128, 128),
56
+ 'mean': [ 0.0, 0.0, 0.0 ],
57
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
58
+ },
59
+ 'simswap_256':
60
+ {
61
+ 'type': 'simswap',
62
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx',
63
+ 'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
64
+ 'template': 'arcface_v1',
65
+ 'size': (112, 256),
66
+ 'mean': [ 0.485, 0.456, 0.406 ],
67
+ 'standard_deviation': [ 0.229, 0.224, 0.225 ]
68
+ },
69
+ 'simswap_512_unofficial':
70
+ {
71
+ 'type': 'simswap',
72
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx',
73
+ 'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
74
+ 'template': 'arcface_v1',
75
+ 'size': (112, 512),
76
+ 'mean': [ 0.0, 0.0, 0.0 ],
77
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
78
+ }
79
+ }
80
+ OPTIONS : Optional[OptionsWithModel] = None
81
+
82
+
83
+ def get_frame_processor() -> Any:
84
+ global FRAME_PROCESSOR
85
+
86
+ with THREAD_LOCK:
87
+ if FRAME_PROCESSOR is None:
88
+ model_path = get_options('model').get('path')
89
+ FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
90
+ return FRAME_PROCESSOR
91
+
92
+
93
+ def clear_frame_processor() -> None:
94
+ global FRAME_PROCESSOR
95
+
96
+ FRAME_PROCESSOR = None
97
+
98
+
99
+ def get_model_matrix() -> Any:
100
+ global MODEL_MATRIX
101
+
102
+ with THREAD_LOCK:
103
+ if MODEL_MATRIX is None:
104
+ model_path = get_options('model').get('path')
105
+ model = onnx.load(model_path)
106
+ MODEL_MATRIX = numpy_helper.to_array(model.graph.initializer[-1])
107
+ return MODEL_MATRIX
108
+
109
+
110
+ def clear_model_matrix() -> None:
111
+ global MODEL_MATRIX
112
+
113
+ MODEL_MATRIX = None
114
+
115
+
116
+ def get_options(key : Literal['model']) -> Any:
117
+ global OPTIONS
118
+
119
+ if OPTIONS is None:
120
+ OPTIONS =\
121
+ {
122
+ 'model': MODELS[frame_processors_globals.face_swapper_model]
123
+ }
124
+ return OPTIONS.get(key)
125
+
126
+
127
+ def set_options(key : Literal['model'], value : Any) -> None:
128
+ global OPTIONS
129
+
130
+ OPTIONS[key] = value
131
+
132
+
133
+ def register_args(program : ArgumentParser) -> None:
134
+ pass
135
+
136
+
137
+ def apply_args(program : ArgumentParser) -> None:
138
+ args = program.parse_args()
139
+ frame_processors_globals.face_swapper_model = args.face_swapper_model
140
+ if args.face_swapper_model == 'blendface_256':
141
+ facefusion.globals.face_recognizer_model = 'arcface_blendface'
142
+ if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
143
+ facefusion.globals.face_recognizer_model = 'arcface_inswapper'
144
+ if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
145
+ facefusion.globals.face_recognizer_model = 'arcface_simswap'
146
+
147
+
148
+ def pre_check() -> bool:
149
+ if not facefusion.globals.skip_download:
150
+ download_directory_path = resolve_relative_path('../.assets/models')
151
+ model_url = get_options('model').get('url')
152
+ conditional_download(download_directory_path, [ model_url ])
153
+ return True
154
+
155
+
156
+ def pre_process(mode : ProcessMode) -> bool:
157
+ model_url = get_options('model').get('url')
158
+ model_path = get_options('model').get('path')
159
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
160
+ update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
161
+ return False
162
+ elif not is_file(model_path):
163
+ update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
164
+ return False
165
+ if not is_image(facefusion.globals.source_path):
166
+ update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
167
+ return False
168
+ elif not get_one_face(read_static_image(facefusion.globals.source_path)):
169
+ update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
170
+ return False
171
+ if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
172
+ update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
173
+ return False
174
+ if mode == 'output' and not facefusion.globals.output_path:
175
+ update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
176
+ return False
177
+ return True
178
+
179
+
180
+ def post_process() -> None:
181
+ clear_frame_processor()
182
+ clear_model_matrix()
183
+ clear_face_analyser()
184
+ clear_content_analyser()
185
+ read_static_image.cache_clear()
186
+
187
+
188
+ def apply_blur_to_face(target_face: Face, temp_frame: Frame) -> Frame:
189
+ print('apply_blur_to_face')
190
+ model_template = get_options('model').get('template')
191
+ model_size = get_options('model').get('size')
192
+ crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
193
+ blurred_face = apply_blur(crop_frame)
194
+ temp_frame = paste_back_ellipse(temp_frame, blurred_face, affine_matrix, facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding)
195
+ return temp_frame
196
+
197
+
198
+ def apply_blur(crop_frame: Frame) -> Frame:
199
+ blurred_frame = cv2.GaussianBlur(crop_frame, (45, 45), 0)
200
+ return blurred_frame
201
+
202
+
203
+ def prepare_source_frame(source_face : Face) -> numpy.ndarray[Any, Any]:
204
+ source_frame = read_static_image(facefusion.globals.source_path)
205
+ source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_v2', (112, 112))
206
+ source_frame = source_frame[:, :, ::-1] / 255.0
207
+ source_frame = source_frame.transpose(2, 0, 1)
208
+ source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
209
+ return source_frame
210
+
211
+
212
+ def prepare_source_embedding(source_face : Face) -> Embedding:
213
+ model_type = get_options('model').get('type')
214
+ if model_type == 'inswapper':
215
+ model_matrix = get_model_matrix()
216
+ source_embedding = source_face.embedding.reshape((1, -1))
217
+ source_embedding = numpy.dot(source_embedding, model_matrix) / numpy.linalg.norm(source_embedding)
218
+ else:
219
+ source_embedding = source_face.normed_embedding.reshape(1, -1)
220
+ return source_embedding
221
+
222
+
223
+ def prepare_crop_frame(crop_frame : Frame) -> Frame:
224
+ model_mean = get_options('model').get('mean')
225
+ model_standard_deviation = get_options('model').get('standard_deviation')
226
+ crop_frame = crop_frame[:, :, ::-1] / 255.0
227
+ crop_frame = (crop_frame - model_mean) / model_standard_deviation
228
+ crop_frame = crop_frame.transpose(2, 0, 1)
229
+ crop_frame = numpy.expand_dims(crop_frame, axis = 0).astype(numpy.float32)
230
+ return crop_frame
231
+
232
+
233
+ def normalize_crop_frame(crop_frame : Frame) -> Frame:
234
+ crop_frame = crop_frame.transpose(1, 2, 0)
235
+ crop_frame = (crop_frame * 255.0).round()
236
+ crop_frame = crop_frame[:, :, ::-1].astype(numpy.uint8)
237
+ return crop_frame
238
+
239
+
240
+ def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame:
241
+ if 'reference' in facefusion.globals.face_selector_mode:
242
+ similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
243
+ if similar_faces:
244
+ for similar_face in similar_faces:
245
+ temp_frame = apply_blur_to_face(similar_face, temp_frame)
246
+ if 'one' in facefusion.globals.face_selector_mode:
247
+ target_face = get_one_face(temp_frame)
248
+ if target_face:
249
+ temp_frame = apply_blur_to_face(target_face, temp_frame)
250
+ if 'many' in facefusion.globals.face_selector_mode:
251
+ many_faces = get_many_faces(temp_frame)
252
+ if many_faces:
253
+ for target_face in many_faces:
254
+ temp_frame = apply_blur_to_face(target_face, temp_frame)
255
+ return temp_frame
256
+
257
+
258
+ def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
259
+ source_face = get_one_face(read_static_image(source_path))
260
+ reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
261
+ for temp_frame_path in temp_frame_paths:
262
+ temp_frame = read_image(temp_frame_path)
263
+ result_frame = process_frame(source_face, reference_face, temp_frame)
264
+ write_image(temp_frame_path, result_frame)
265
+ update_progress()
266
+
267
+
268
+ def process_image(source_path : str, target_path : str, output_path : str) -> None:
269
+ source_face = get_one_face(read_static_image(source_path))
270
+ target_frame = read_static_image(target_path)
271
+ reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
272
+ result_frame = process_frame(source_face, reference_face, target_frame)
273
+ write_image(output_path, result_frame)
274
+
275
+
276
+ def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
277
+ frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/face_debugger.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Literal
2
+ from argparse import ArgumentParser
3
+ import cv2
4
+ import numpy
5
+
6
+ import facefusion.globals
7
+ import facefusion.processors.frame.core as frame_processors
8
+ from facefusion import wording
9
+ from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser
10
+ from facefusion.face_reference import get_face_reference
11
+ from facefusion.content_analyser import clear_content_analyser
12
+ from facefusion.typing import Face, Frame, Update_Process, ProcessMode
13
+ from facefusion.vision import read_image, read_static_image, write_image
14
+ from facefusion.face_helper import warp_face, create_static_mask_frame
15
+ from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
16
+
17
+ NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_DEBUGGER'
18
+
19
+
20
+ def get_frame_processor() -> None:
21
+ pass
22
+
23
+
24
+ def clear_frame_processor() -> None:
25
+ pass
26
+
27
+
28
+ def get_options(key : Literal['model']) -> None:
29
+ pass
30
+
31
+
32
+ def set_options(key : Literal['model'], value : Any) -> None:
33
+ pass
34
+
35
+
36
+ def register_args(program : ArgumentParser) -> None:
37
+ program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help'), dest = 'face_debugger_items', default = [ 'kps', 'face-mask' ], choices = frame_processors_choices.face_debugger_items, nargs = '+')
38
+
39
+
40
+ def apply_args(program : ArgumentParser) -> None:
41
+ args = program.parse_args()
42
+ frame_processors_globals.face_debugger_items = args.face_debugger_items
43
+
44
+
45
+ def pre_check() -> bool:
46
+ return True
47
+
48
+
49
+ def pre_process(mode : ProcessMode) -> bool:
50
+ return True
51
+
52
+
53
+ def post_process() -> None:
54
+ clear_frame_processor()
55
+ clear_face_analyser()
56
+ clear_content_analyser()
57
+
58
+
59
+ def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
60
+ primary_color = (0, 0, 255)
61
+ secondary_color = (0, 255, 0)
62
+ bounding_box = target_face.bbox.astype(numpy.int32)
63
+ if 'bbox' in frame_processors_globals.face_debugger_items:
64
+ cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2)
65
+ if 'face-mask' in frame_processors_globals.face_debugger_items:
66
+ crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, 'arcface_v2', (128, 128))
67
+ inverse_matrix = cv2.invertAffineTransform(affine_matrix)
68
+ temp_frame_size = temp_frame.shape[:2][::-1]
69
+ mask_frame = create_static_mask_frame(crop_frame.shape[:2], 0, facefusion.globals.face_mask_padding)
70
+ mask_frame[mask_frame > 0] = 255
71
+ inverse_mask_frame = cv2.warpAffine(mask_frame.astype(numpy.uint8), inverse_matrix, temp_frame_size)
72
+ inverse_mask_contours = cv2.findContours(inverse_mask_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
73
+ cv2.drawContours(temp_frame, inverse_mask_contours, 0, primary_color, 2)
74
+ if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60:
75
+ if 'kps' in frame_processors_globals.face_debugger_items:
76
+ kps = target_face.kps.astype(numpy.int32)
77
+ for index in range(kps.shape[0]):
78
+ cv2.circle(temp_frame, (kps[index][0], kps[index][1]), 3, primary_color, -1)
79
+ if 'score' in frame_processors_globals.face_debugger_items:
80
+ score_text = str(round(target_face.score, 2))
81
+ score_position = (bounding_box[0] + 10, bounding_box[1] + 20)
82
+ cv2.putText(temp_frame, score_text, score_position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, secondary_color, 2)
83
+ return temp_frame
84
+
85
+
86
+ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
87
+ if 'reference' in facefusion.globals.face_selector_mode:
88
+ similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
89
+ if similar_faces:
90
+ for similar_face in similar_faces:
91
+ temp_frame = debug_face(source_face, similar_face, temp_frame)
92
+ if 'one' in facefusion.globals.face_selector_mode:
93
+ target_face = get_one_face(temp_frame)
94
+ if target_face:
95
+ temp_frame = debug_face(source_face, target_face, temp_frame)
96
+ if 'many' in facefusion.globals.face_selector_mode:
97
+ many_faces = get_many_faces(temp_frame)
98
+ if many_faces:
99
+ for target_face in many_faces:
100
+ temp_frame = debug_face(source_face, target_face, temp_frame)
101
+ return temp_frame
102
+
103
+
104
+ def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
105
+ source_face = get_one_face(read_static_image(source_path))
106
+ reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
107
+ for temp_frame_path in temp_frame_paths:
108
+ temp_frame = read_image(temp_frame_path)
109
+ result_frame = process_frame(source_face, reference_face, temp_frame)
110
+ write_image(temp_frame_path, result_frame)
111
+ update_progress()
112
+
113
+
114
+ def process_image(source_path : str, target_path : str, output_path : str) -> None:
115
+ source_face = get_one_face(read_static_image(source_path))
116
+ target_frame = read_static_image(target_path)
117
+ reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
118
+ result_frame = process_frame(source_face, reference_face, target_frame)
119
+ write_image(output_path, result_frame)
120
+
121
+
122
+ def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
123
+ frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/face_enhancer.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Dict, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ import cv2
4
+ import threading
5
+ import numpy
6
+ import onnxruntime
7
+
8
+ import facefusion.globals
9
+ import facefusion.processors.frame.core as frame_processors
10
+ from facefusion import wording
11
+ from facefusion.face_analyser import get_many_faces, clear_face_analyser
12
+ from facefusion.face_helper import warp_face, paste_back
13
+ from facefusion.content_analyser import clear_content_analyser
14
+ from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel
15
+ from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, create_metavar, update_status
16
+ from facefusion.vision import read_image, read_static_image, write_image
17
+ from facefusion.processors.frame import globals as frame_processors_globals
18
+ from facefusion.processors.frame import choices as frame_processors_choices
19
+
20
+ FRAME_PROCESSOR = None
21
+ THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
22
+ THREAD_LOCK : threading.Lock = threading.Lock()
23
+ NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER'
24
+ MODELS : Dict[str, ModelValue] =\
25
+ {
26
+ 'codeformer':
27
+ {
28
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
29
+ 'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
30
+ 'template': 'ffhq',
31
+ 'size': (512, 512)
32
+ },
33
+ 'gfpgan_1.2':
34
+ {
35
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
36
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
37
+ 'template': 'ffhq',
38
+ 'size': (512, 512)
39
+ },
40
+ 'gfpgan_1.3':
41
+ {
42
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
43
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
44
+ 'template': 'ffhq',
45
+ 'size': (512, 512)
46
+ },
47
+ 'gfpgan_1.4':
48
+ {
49
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
50
+ 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
51
+ 'template': 'ffhq',
52
+ 'size': (512, 512)
53
+ },
54
+ 'gpen_bfr_256':
55
+ {
56
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
57
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
58
+ 'template': 'arcface_v2',
59
+ 'size': (128, 256)
60
+ },
61
+ 'gpen_bfr_512':
62
+ {
63
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
64
+ 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
65
+ 'template': 'ffhq',
66
+ 'size': (512, 512)
67
+ },
68
+ 'restoreformer':
69
+ {
70
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
71
+ 'path': resolve_relative_path('../.assets/models/restoreformer.onnx'),
72
+ 'template': 'ffhq',
73
+ 'size': (512, 512)
74
+ }
75
+ }
76
+ OPTIONS : Optional[OptionsWithModel] = None
77
+
78
+
79
+ def get_frame_processor() -> Any:
80
+ global FRAME_PROCESSOR
81
+
82
+ with THREAD_LOCK:
83
+ if FRAME_PROCESSOR is None:
84
+ model_path = get_options('model').get('path')
85
+ FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
86
+ return FRAME_PROCESSOR
87
+
88
+
89
+ def clear_frame_processor() -> None:
90
+ global FRAME_PROCESSOR
91
+
92
+ FRAME_PROCESSOR = None
93
+
94
+
95
+ def get_options(key : Literal['model']) -> Any:
96
+ global OPTIONS
97
+
98
+ if OPTIONS is None:
99
+ OPTIONS =\
100
+ {
101
+ 'model': MODELS[frame_processors_globals.face_enhancer_model]
102
+ }
103
+ return OPTIONS.get(key)
104
+
105
+
106
+ def set_options(key : Literal['model'], value : Any) -> None:
107
+ global OPTIONS
108
+
109
+ OPTIONS[key] = value
110
+
111
+
112
+ def register_args(program : ArgumentParser) -> None:
113
+ program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
114
+ program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'face_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
115
+
116
+
117
+ def apply_args(program : ArgumentParser) -> None:
118
+ args = program.parse_args()
119
+ frame_processors_globals.face_enhancer_model = args.face_enhancer_model
120
+ frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend
121
+
122
+
123
+ def pre_check() -> bool:
124
+ if not facefusion.globals.skip_download:
125
+ download_directory_path = resolve_relative_path('../.assets/models')
126
+ model_url = get_options('model').get('url')
127
+ print("下载文件",download_directory_path,model_url)
128
+ conditional_download(download_directory_path, [ model_url ])
129
+ return True
130
+
131
+
132
+ def pre_process(mode : ProcessMode) -> bool:
133
+ model_url = get_options('model').get('url')
134
+ model_path = get_options('model').get('path')
135
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
136
+ update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
137
+ return False
138
+ elif not is_file(model_path):
139
+ update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
140
+ return False
141
+ if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
142
+ update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
143
+ return False
144
+ if mode == 'output' and not facefusion.globals.output_path:
145
+ update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
146
+ return False
147
+ return True
148
+
149
+
150
+ def post_process() -> None:
151
+ clear_frame_processor()
152
+ clear_face_analyser()
153
+ clear_content_analyser()
154
+ read_static_image.cache_clear()
155
+
156
+
157
+ def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
158
+ frame_processor = get_frame_processor()
159
+ model_template = get_options('model').get('template')
160
+ model_size = get_options('model').get('size')
161
+ crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
162
+ crop_frame = prepare_crop_frame(crop_frame)
163
+ frame_processor_inputs = {}
164
+ for frame_processor_input in frame_processor.get_inputs():
165
+ if frame_processor_input.name == 'input':
166
+ frame_processor_inputs[frame_processor_input.name] = crop_frame
167
+ if frame_processor_input.name == 'weight':
168
+ frame_processor_inputs[frame_processor_input.name] = numpy.array([ 1 ], dtype = numpy.double)
169
+ with THREAD_SEMAPHORE:
170
+ crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
171
+ crop_frame = normalize_crop_frame(crop_frame)
172
+ paste_frame = paste_back(temp_frame, crop_frame, affine_matrix, facefusion.globals.face_mask_blur, (0, 0, 0, 0))
173
+ temp_frame = blend_frame(temp_frame, paste_frame)
174
+ return temp_frame
175
+
176
+
177
+ def prepare_crop_frame(crop_frame : Frame) -> Frame:
178
+ crop_frame = crop_frame[:, :, ::-1] / 255.0
179
+ crop_frame = (crop_frame - 0.5) / 0.5
180
+ crop_frame = numpy.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
181
+ return crop_frame
182
+
183
+
184
+ def normalize_crop_frame(crop_frame : Frame) -> Frame:
185
+ crop_frame = numpy.clip(crop_frame, -1, 1)
186
+ crop_frame = (crop_frame + 1) / 2
187
+ crop_frame = crop_frame.transpose(1, 2, 0)
188
+ crop_frame = (crop_frame * 255.0).round()
189
+ crop_frame = crop_frame.astype(numpy.uint8)[:, :, ::-1]
190
+ return crop_frame
191
+
192
+
193
+ def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
194
+ face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100)
195
+ temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0)
196
+ return temp_frame
197
+
198
+
199
+ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
200
+ many_faces = get_many_faces(temp_frame)
201
+ if many_faces:
202
+ for target_face in many_faces:
203
+ temp_frame = enhance_face(target_face, temp_frame)
204
+ return temp_frame
205
+
206
+
207
+ def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
208
+ for temp_frame_path in temp_frame_paths:
209
+ temp_frame = read_image(temp_frame_path)
210
+ result_frame = process_frame(None, None, temp_frame)
211
+ write_image(temp_frame_path, result_frame)
212
+ update_progress()
213
+
214
+
215
+ def process_image(source_path : str, target_path : str, output_path : str) -> None:
216
+ target_frame = read_static_image(target_path)
217
+ result_frame = process_frame(None, None, target_frame)
218
+ write_image(output_path, result_frame)
219
+
220
+
221
+ def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
222
+ frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/face_swapper.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Dict, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ import threading
4
+ import numpy
5
+ import onnx
6
+ import onnxruntime
7
+ from onnx import numpy_helper
8
+
9
+ import facefusion.globals
10
+ import facefusion.processors.frame.core as frame_processors
11
+ from facefusion import wording
12
+ from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser
13
+ from facefusion.face_helper import warp_face, paste_back
14
+ from facefusion.face_reference import get_face_reference
15
+ from facefusion.content_analyser import clear_content_analyser
16
+ from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel, Embedding
17
+ from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, update_status
18
+ from facefusion.vision import read_image, read_static_image, write_image
19
+ from facefusion.processors.frame import globals as frame_processors_globals
20
+ from facefusion.processors.frame import choices as frame_processors_choices
21
+
22
+ FRAME_PROCESSOR = None
23
+ MODEL_MATRIX = None
24
+ THREAD_LOCK : threading.Lock = threading.Lock()
25
+ NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER'
26
+ MODELS : Dict[str, ModelValue] =\
27
+ {
28
+ 'blendface_256':
29
+ {
30
+ 'type': 'blendface',
31
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendface_256.onnx',
32
+ 'path': resolve_relative_path('../.assets/models/blendface_256.onnx'),
33
+ 'template': 'ffhq',
34
+ 'size': (512, 256),
35
+ 'mean': [ 0.0, 0.0, 0.0 ],
36
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
37
+ },
38
+ 'inswapper_128':
39
+ {
40
+ 'type': 'inswapper',
41
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx',
42
+ 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
43
+ 'template': 'arcface_v2',
44
+ 'size': (128, 128),
45
+ 'mean': [ 0.0, 0.0, 0.0 ],
46
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
47
+ },
48
+ 'inswapper_128_fp16':
49
+ {
50
+ 'type': 'inswapper',
51
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx',
52
+ 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
53
+ 'template': 'arcface_v2',
54
+ 'size': (128, 128),
55
+ 'mean': [ 0.0, 0.0, 0.0 ],
56
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
57
+ },
58
+ 'simswap_256':
59
+ {
60
+ 'type': 'simswap',
61
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx',
62
+ 'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
63
+ 'template': 'arcface_v1',
64
+ 'size': (112, 256),
65
+ 'mean': [ 0.485, 0.456, 0.406 ],
66
+ 'standard_deviation': [ 0.229, 0.224, 0.225 ]
67
+ },
68
+ 'simswap_512_unofficial':
69
+ {
70
+ 'type': 'simswap',
71
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx',
72
+ 'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
73
+ 'template': 'arcface_v1',
74
+ 'size': (112, 512),
75
+ 'mean': [ 0.0, 0.0, 0.0 ],
76
+ 'standard_deviation': [ 1.0, 1.0, 1.0 ]
77
+ }
78
+ }
79
+ OPTIONS : Optional[OptionsWithModel] = None
80
+
81
+
82
+ def get_frame_processor() -> Any:
83
+ global FRAME_PROCESSOR
84
+
85
+ with THREAD_LOCK:
86
+ if FRAME_PROCESSOR is None:
87
+ model_path = get_options('model').get('path')
88
+ FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
89
+ return FRAME_PROCESSOR
90
+
91
+
92
+ def clear_frame_processor() -> None:
93
+ global FRAME_PROCESSOR
94
+
95
+ FRAME_PROCESSOR = None
96
+
97
+
98
+ def get_model_matrix() -> Any:
99
+ global MODEL_MATRIX
100
+
101
+ with THREAD_LOCK:
102
+ if MODEL_MATRIX is None:
103
+ model_path = get_options('model').get('path')
104
+ model = onnx.load(model_path)
105
+ MODEL_MATRIX = numpy_helper.to_array(model.graph.initializer[-1])
106
+ return MODEL_MATRIX
107
+
108
+
109
+ def clear_model_matrix() -> None:
110
+ global MODEL_MATRIX
111
+
112
+ MODEL_MATRIX = None
113
+
114
+
115
+ def get_options(key : Literal['model']) -> Any:
116
+ global OPTIONS
117
+
118
+ if OPTIONS is None:
119
+ OPTIONS =\
120
+ {
121
+ 'model': MODELS[frame_processors_globals.face_swapper_model]
122
+ }
123
+ return OPTIONS.get(key)
124
+
125
+
126
+ def set_options(key : Literal['model'], value : Any) -> None:
127
+ global OPTIONS
128
+
129
+ OPTIONS[key] = value
130
+
131
+
132
+ def register_args(program : ArgumentParser) -> None:
133
+ program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), dest = 'face_swapper_model', default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models)
134
+
135
+
136
+ def apply_args(program : ArgumentParser) -> None:
137
+ args = program.parse_args()
138
+ frame_processors_globals.face_swapper_model = args.face_swapper_model
139
+ if args.face_swapper_model == 'blendface_256':
140
+ facefusion.globals.face_recognizer_model = 'arcface_blendface'
141
+ if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
142
+ facefusion.globals.face_recognizer_model = 'arcface_inswapper'
143
+ if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
144
+ facefusion.globals.face_recognizer_model = 'arcface_simswap'
145
+
146
+
147
+ def pre_check() -> bool:
148
+ if not facefusion.globals.skip_download:
149
+ download_directory_path = resolve_relative_path('../.assets/models')
150
+ model_url = get_options('model').get('url')
151
+ conditional_download(download_directory_path, [ model_url ])
152
+ return True
153
+
154
+
155
+ def pre_process(mode : ProcessMode) -> bool:
156
+ model_url = get_options('model').get('url')
157
+ model_path = get_options('model').get('path')
158
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
159
+ update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
160
+ return False
161
+ elif not is_file(model_path):
162
+ update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
163
+ return False
164
+ if not is_image(facefusion.globals.source_path):
165
+ update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
166
+ return False
167
+ elif not get_one_face(read_static_image(facefusion.globals.source_path)):
168
+ update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
169
+ return False
170
+ if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
171
+ update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
172
+ return False
173
+ if mode == 'output' and not facefusion.globals.output_path:
174
+ update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
175
+ return False
176
+ return True
177
+
178
+
179
+ def post_process() -> None:
180
+ clear_frame_processor()
181
+ clear_model_matrix()
182
+ clear_face_analyser()
183
+ clear_content_analyser()
184
+ read_static_image.cache_clear()
185
+
186
+
187
+ def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
188
+ frame_processor = get_frame_processor()
189
+ model_template = get_options('model').get('template')
190
+ model_size = get_options('model').get('size')
191
+ model_type = get_options('model').get('type')
192
+ crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
193
+ crop_frame = prepare_crop_frame(crop_frame)
194
+ frame_processor_inputs = {}
195
+ for frame_processor_input in frame_processor.get_inputs():
196
+ if frame_processor_input.name == 'source':
197
+ if model_type == 'blendface':
198
+ frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face)
199
+ else:
200
+ frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face)
201
+ if frame_processor_input.name == 'target':
202
+ frame_processor_inputs[frame_processor_input.name] = crop_frame
203
+ crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
204
+ crop_frame = normalize_crop_frame(crop_frame)
205
+ temp_frame = paste_back(temp_frame, crop_frame, affine_matrix, facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding)
206
+ return temp_frame
207
+
208
+
209
+ def prepare_source_frame(source_face : Face) -> numpy.ndarray[Any, Any]:
210
+ source_frame = read_static_image(facefusion.globals.source_path)
211
+ source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_v2', (112, 112))
212
+ source_frame = source_frame[:, :, ::-1] / 255.0
213
+ source_frame = source_frame.transpose(2, 0, 1)
214
+ source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
215
+ return source_frame
216
+
217
+
218
+ def prepare_source_embedding(source_face : Face) -> Embedding:
219
+ model_type = get_options('model').get('type')
220
+ if model_type == 'inswapper':
221
+ model_matrix = get_model_matrix()
222
+ source_embedding = source_face.embedding.reshape((1, -1))
223
+ source_embedding = numpy.dot(source_embedding, model_matrix) / numpy.linalg.norm(source_embedding)
224
+ else:
225
+ source_embedding = source_face.normed_embedding.reshape(1, -1)
226
+ return source_embedding
227
+
228
+
229
+ def prepare_crop_frame(crop_frame : Frame) -> Frame:
230
+ model_mean = get_options('model').get('mean')
231
+ model_standard_deviation = get_options('model').get('standard_deviation')
232
+ crop_frame = crop_frame[:, :, ::-1] / 255.0
233
+ crop_frame = (crop_frame - model_mean) / model_standard_deviation
234
+ crop_frame = crop_frame.transpose(2, 0, 1)
235
+ crop_frame = numpy.expand_dims(crop_frame, axis = 0).astype(numpy.float32)
236
+ return crop_frame
237
+
238
+
239
+ def normalize_crop_frame(crop_frame : Frame) -> Frame:
240
+ crop_frame = crop_frame.transpose(1, 2, 0)
241
+ crop_frame = (crop_frame * 255.0).round()
242
+ crop_frame = crop_frame[:, :, ::-1].astype(numpy.uint8)
243
+ return crop_frame
244
+
245
+
246
+ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
247
+ if 'reference' in facefusion.globals.face_selector_mode:
248
+ similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
249
+ if similar_faces:
250
+ for similar_face in similar_faces:
251
+ temp_frame = swap_face(source_face, similar_face, temp_frame)
252
+ if 'one' in facefusion.globals.face_selector_mode:
253
+ target_face = get_one_face(temp_frame)
254
+ if target_face:
255
+ temp_frame = swap_face(source_face, target_face, temp_frame)
256
+ if 'many' in facefusion.globals.face_selector_mode:
257
+ many_faces = get_many_faces(temp_frame)
258
+ if many_faces:
259
+ for target_face in many_faces:
260
+ temp_frame = swap_face(source_face, target_face, temp_frame)
261
+ return temp_frame
262
+
263
+
264
+ def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
265
+ source_face = get_one_face(read_static_image(source_path))
266
+ reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
267
+ for temp_frame_path in temp_frame_paths:
268
+ temp_frame = read_image(temp_frame_path)
269
+ result_frame = process_frame(source_face, reference_face, temp_frame)
270
+ write_image(temp_frame_path, result_frame)
271
+ update_progress()
272
+
273
+
274
+ def process_image(source_path : str, target_path : str, output_path : str) -> None:
275
+ source_face = get_one_face(read_static_image(source_path))
276
+ target_frame = read_static_image(target_path)
277
+ reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
278
+ result_frame = process_frame(source_face, reference_face, target_frame)
279
+ write_image(output_path, result_frame)
280
+
281
+
282
+ def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
283
+ frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/frame_enhancer.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Dict, Literal, Optional
2
+ from argparse import ArgumentParser
3
+ import threading
4
+ import cv2
5
+ from basicsr.archs.rrdbnet_arch import RRDBNet
6
+ from realesrgan import RealESRGANer
7
+
8
+ import facefusion.globals
9
+ import facefusion.processors.frame.core as frame_processors
10
+ from facefusion import wording
11
+ from facefusion.face_analyser import clear_face_analyser
12
+ from facefusion.content_analyser import clear_content_analyser
13
+ from facefusion.typing import Frame, Face, Update_Process, ProcessMode, ModelValue, OptionsWithModel
14
+ from facefusion.utilities import conditional_download, resolve_relative_path, is_file, is_download_done, map_device, create_metavar, update_status
15
+ from facefusion.vision import read_image, read_static_image, write_image
16
+ from facefusion.processors.frame import globals as frame_processors_globals
17
+ from facefusion.processors.frame import choices as frame_processors_choices
18
+
19
+ FRAME_PROCESSOR = None
20
+ THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
21
+ THREAD_LOCK : threading.Lock = threading.Lock()
22
+ NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER'
23
+ MODELS: Dict[str, ModelValue] =\
24
+ {
25
+ 'real_esrgan_x2plus':
26
+ {
27
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x2plus.pth',
28
+ 'path': resolve_relative_path('../.assets/models/real_esrgan_x2plus.pth'),
29
+ 'scale': 2
30
+ },
31
+ 'real_esrgan_x4plus':
32
+ {
33
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x4plus.pth',
34
+ 'path': resolve_relative_path('../.assets/models/real_esrgan_x4plus.pth'),
35
+ 'scale': 4
36
+ },
37
+ 'real_esrnet_x4plus':
38
+ {
39
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrnet_x4plus.pth',
40
+ 'path': resolve_relative_path('../.assets/models/real_esrnet_x4plus.pth'),
41
+ 'scale': 4
42
+ }
43
+ }
44
+ OPTIONS : Optional[OptionsWithModel] = None
45
+
46
+
47
+ def get_frame_processor() -> Any:
48
+ global FRAME_PROCESSOR
49
+
50
+ with THREAD_LOCK:
51
+ if FRAME_PROCESSOR is None:
52
+ model_path = get_options('model').get('path')
53
+ model_scale = get_options('model').get('scale')
54
+ FRAME_PROCESSOR = RealESRGANer(
55
+ model_path = model_path,
56
+ model = RRDBNet(
57
+ num_in_ch = 3,
58
+ num_out_ch = 3,
59
+ scale = model_scale
60
+ ),
61
+ device = map_device(facefusion.globals.execution_providers),
62
+ scale = model_scale
63
+ )
64
+ return FRAME_PROCESSOR
65
+
66
+
67
+ def clear_frame_processor() -> None:
68
+ global FRAME_PROCESSOR
69
+
70
+ FRAME_PROCESSOR = None
71
+
72
+
73
+ def get_options(key : Literal['model']) -> Any:
74
+ global OPTIONS
75
+
76
+ if OPTIONS is None:
77
+ OPTIONS =\
78
+ {
79
+ 'model': MODELS[frame_processors_globals.frame_enhancer_model]
80
+ }
81
+ return OPTIONS.get(key)
82
+
83
+
84
+ def set_options(key : Literal['model'], value : Any) -> None:
85
+ global OPTIONS
86
+
87
+ OPTIONS[key] = value
88
+
89
+
90
+ def register_args(program : ArgumentParser) -> None:
91
+ program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'frame_enhancer_model', default = 'real_esrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models)
92
+ program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'frame_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
93
+
94
+
95
+ def apply_args(program : ArgumentParser) -> None:
96
+ args = program.parse_args()
97
+ frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model
98
+ frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend
99
+
100
+
101
+ def pre_check() -> bool:
102
+ if not facefusion.globals.skip_download:
103
+ download_directory_path = resolve_relative_path('../.assets/models')
104
+ model_url = get_options('model').get('url')
105
+ conditional_download(download_directory_path, [ model_url ])
106
+ return True
107
+
108
+
109
+ def pre_process(mode : ProcessMode) -> bool:
110
+ model_url = get_options('model').get('url')
111
+ model_path = get_options('model').get('path')
112
+ if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
113
+ update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
114
+ return False
115
+ elif not is_file(model_path):
116
+ update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
117
+ return False
118
+ if mode == 'output' and not facefusion.globals.output_path:
119
+ update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
120
+ return False
121
+ return True
122
+
123
+
124
+ def post_process() -> None:
125
+ clear_frame_processor()
126
+ clear_face_analyser()
127
+ clear_content_analyser()
128
+ read_static_image.cache_clear()
129
+
130
+
131
+ def enhance_frame(temp_frame : Frame) -> Frame:
132
+ with THREAD_SEMAPHORE:
133
+ paste_frame, _ = get_frame_processor().enhance(temp_frame)
134
+ temp_frame = blend_frame(temp_frame, paste_frame)
135
+ return temp_frame
136
+
137
+
138
+ def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
139
+ frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100)
140
+ paste_frame_height, paste_frame_width = paste_frame.shape[0:2]
141
+ temp_frame = cv2.resize(temp_frame, (paste_frame_width, paste_frame_height))
142
+ temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, 1 - frame_enhancer_blend, 0)
143
+ return temp_frame
144
+
145
+
146
+ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
147
+ return enhance_frame(temp_frame)
148
+
149
+
150
+ def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
151
+ for temp_frame_path in temp_frame_paths:
152
+ temp_frame = read_image(temp_frame_path)
153
+ result_frame = process_frame(None, None, temp_frame)
154
+ write_image(temp_frame_path, result_frame)
155
+ update_progress()
156
+
157
+
158
+ def process_image(source_path : str, target_path : str, output_path : str) -> None:
159
+ target_frame = read_static_image(target_path)
160
+ result = process_frame(None, None, target_frame)
161
+ write_image(output_path, result)
162
+
163
+
164
+ def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
165
+ frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
facefusion/processors/frame/typings.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ FaceSwapperModel = Literal['blendface_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
4
+ FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer']
5
+ FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
6
+
7
+ FaceDebuggerItem = Literal['bbox', 'kps', 'face-mask', 'score']
facefusion/typing.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict
3
+ import numpy
4
+
5
+ Bbox = numpy.ndarray[Any, Any]
6
+ Kps = numpy.ndarray[Any, Any]
7
+ Score = float
8
+ Embedding = numpy.ndarray[Any, Any]
9
+ Face = namedtuple('Face',
10
+ [
11
+ 'bbox',
12
+ 'kps',
13
+ 'score',
14
+ 'embedding',
15
+ 'normed_embedding',
16
+ 'gender',
17
+ 'age'
18
+ ])
19
+ Frame = numpy.ndarray[Any, Any]
20
+ Matrix = numpy.ndarray[Any, Any]
21
+ Padding = Tuple[int, int, int, int]
22
+
23
+ Update_Process = Callable[[], None]
24
+ Process_Frames = Callable[[str, List[str], Update_Process], None]
25
+
26
+ Template = Literal['arcface_v1', 'arcface_v2', 'ffhq']
27
+ ProcessMode = Literal['output', 'preview', 'stream']
28
+ FaceSelectorMode = Literal['reference', 'one', 'many']
29
+ FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best']
30
+ FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior']
31
+ FaceAnalyserGender = Literal['male', 'female']
32
+ FaceDetectorModel = Literal['retinaface', 'yunet']
33
+ FaceRecognizerModel = Literal['arcface_blendface', 'arcface_inswapper', 'arcface_simswap']
34
+ TempFrameFormat = Literal['jpg', 'png']
35
+ OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
36
+
37
+ ModelValue = Dict[str, Any]
38
+ OptionsWithModel = TypedDict('OptionsWithModel',
39
+ {
40
+ 'model' : ModelValue
41
+ })
facefusion/uis/__init__.py ADDED
File without changes
facefusion/uis/assets/fixes.css ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ :root:root:root button:not([class])
2
+ {
3
+ border-radius: 0.375rem;
4
+ float: left;
5
+ overflow: hidden;
6
+ width: 100%;
7
+ }
facefusion/uis/assets/overrides.css ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :root:root:root input[type="number"]
2
+ {
3
+ max-width: 6rem;
4
+ }
5
+
6
+ :root:root:root [type="checkbox"],
7
+ :root:root:root [type="radio"]
8
+ {
9
+ border-radius: 50%;
10
+ height: 1.125rem;
11
+ width: 1.125rem;
12
+ }
13
+
14
+ :root:root:root input[type="range"]
15
+ {
16
+ height: 0.5rem;
17
+ }
18
+
19
+ :root:root:root input[type="range"]::-moz-range-thumb,
20
+ :root:root:root input[type="range"]::-webkit-slider-thumb
21
+ {
22
+ background: var(--neutral-300);
23
+ border: unset;
24
+ border-radius: 50%;
25
+ height: 1.125rem;
26
+ width: 1.125rem;
27
+ }
28
+
29
+ :root:root:root input[type="range"]::-webkit-slider-thumb
30
+ {
31
+ margin-top: 0.375rem;
32
+ }
33
+
34
+ :root:root:root .grid-wrap.fixed-height
35
+ {
36
+ min-height: unset;
37
+ }
38
+
39
+ :root:root:root .grid-container
40
+ {
41
+ grid-auto-rows: minmax(5em, 1fr);
42
+ grid-template-columns: repeat(var(--grid-cols), minmax(5em, 1fr));
43
+ grid-template-rows: repeat(var(--grid-rows), minmax(5em, 1fr));
44
+ }
facefusion/uis/choices.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from facefusion.uis.typing import WebcamMode
4
+
5
+ common_options : List[str] = [ 'keep-fps', 'keep-temp', 'skip-audio', 'skip-download' ]
6
+ webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
7
+ webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]
facefusion/uis/components/__init__.py ADDED
File without changes
facefusion/uis/components/about.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ from facefusion import metadata, wording
5
+
6
+ ABOUT_BUTTON : Optional[gradio.HTML] = None
7
+ DONATE_BUTTON : Optional[gradio.HTML] = None
8
+
9
+
10
+ def render() -> None:
11
+ global ABOUT_BUTTON
12
+ global DONATE_BUTTON
13
+
14
+ ABOUT_BUTTON = gradio.Button(
15
+ value = metadata.get('name') + ' ' + metadata.get('version'),
16
+ variant = 'primary',
17
+ link = metadata.get('url')
18
+ )
19
+ DONATE_BUTTON = gradio.Button(
20
+ value = wording.get('donate_button_label'),
21
+ link = 'https://donate.facefusion.io',
22
+ size = 'sm'
23
+ )
facefusion/uis/components/benchmark.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Optional, List, Dict, Generator
2
+ import time
3
+ import tempfile
4
+ import statistics
5
+ import gradio
6
+
7
+ import facefusion.globals
8
+ from facefusion import wording
9
+ from facefusion.face_analyser import get_face_analyser
10
+ from facefusion.face_cache import clear_faces_cache
11
+ from facefusion.processors.frame.core import get_frame_processors_modules
12
+ from facefusion.vision import count_video_frame_total
13
+ from facefusion.core import limit_resources, conditional_process
14
+ from facefusion.utilities import normalize_output_path, clear_temp
15
+ from facefusion.uis.core import get_ui_component
16
+
17
+ BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None
18
+ BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
19
+ BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
20
+ BENCHMARKS : Dict[str, str] =\
21
+ {
22
+ '240p': '.assets/examples/target-240p.mp4',
23
+ '360p': '.assets/examples/target-360p.mp4',
24
+ '540p': '.assets/examples/target-540p.mp4',
25
+ '720p': '.assets/examples/target-720p.mp4',
26
+ '1080p': '.assets/examples/target-1080p.mp4',
27
+ '1440p': '.assets/examples/target-1440p.mp4',
28
+ '2160p': '.assets/examples/target-2160p.mp4'
29
+ }
30
+
31
+
32
+ def render() -> None:
33
+ global BENCHMARK_RESULTS_DATAFRAME
34
+ global BENCHMARK_START_BUTTON
35
+ global BENCHMARK_CLEAR_BUTTON
36
+
37
+ BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe(
38
+ label = wording.get('benchmark_results_dataframe_label'),
39
+ headers =
40
+ [
41
+ 'target_path',
42
+ 'benchmark_cycles',
43
+ 'average_run',
44
+ 'fastest_run',
45
+ 'slowest_run',
46
+ 'relative_fps'
47
+ ],
48
+ datatype =
49
+ [
50
+ 'str',
51
+ 'number',
52
+ 'number',
53
+ 'number',
54
+ 'number',
55
+ 'number'
56
+ ]
57
+ )
58
+ BENCHMARK_START_BUTTON = gradio.Button(
59
+ value = wording.get('start_button_label'),
60
+ variant = 'primary',
61
+ size = 'sm'
62
+ )
63
+ BENCHMARK_CLEAR_BUTTON = gradio.Button(
64
+ value = wording.get('clear_button_label'),
65
+ size = 'sm'
66
+ )
67
+
68
+
69
+ def listen() -> None:
70
+ benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group')
71
+ benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider')
72
+ if benchmark_runs_checkbox_group and benchmark_cycles_slider:
73
+ BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME)
74
+ BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME)
75
+
76
+
77
+ def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
78
+ facefusion.globals.source_path = '.assets/examples/source.jpg'
79
+ target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ]
80
+ benchmark_results = []
81
+ if target_paths:
82
+ pre_process()
83
+ for target_path in target_paths:
84
+ benchmark_results.append(benchmark(target_path, benchmark_cycles))
85
+ yield benchmark_results
86
+ post_process()
87
+
88
+
89
+ def pre_process() -> None:
90
+ limit_resources()
91
+ get_face_analyser()
92
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
93
+ frame_processor_module.get_frame_processor()
94
+
95
+
96
+ def post_process() -> None:
97
+ clear_faces_cache()
98
+
99
+
100
+ def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
101
+ process_times = []
102
+ total_fps = 0.0
103
+ for i in range(benchmark_cycles):
104
+ facefusion.globals.target_path = target_path
105
+ facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, tempfile.gettempdir())
106
+ video_frame_total = count_video_frame_total(facefusion.globals.target_path)
107
+ start_time = time.perf_counter()
108
+ conditional_process()
109
+ end_time = time.perf_counter()
110
+ process_time = end_time - start_time
111
+ total_fps += video_frame_total / process_time
112
+ process_times.append(process_time)
113
+ average_run = round(statistics.mean(process_times), 2)
114
+ fastest_run = round(min(process_times), 2)
115
+ slowest_run = round(max(process_times), 2)
116
+ relative_fps = round(total_fps / benchmark_cycles, 2)
117
+ return\
118
+ [
119
+ facefusion.globals.target_path,
120
+ benchmark_cycles,
121
+ average_run,
122
+ fastest_run,
123
+ slowest_run,
124
+ relative_fps
125
+ ]
126
+
127
+
128
+ def clear() -> gradio.Dataframe:
129
+ if facefusion.globals.target_path:
130
+ clear_temp(facefusion.globals.target_path)
131
+ return gradio.Dataframe(value = None)
facefusion/uis/components/benchmark_options.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ from facefusion import wording
5
+ from facefusion.uis.core import register_ui_component
6
+ from facefusion.uis.components.benchmark import BENCHMARKS
7
+
8
+ BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
9
+ BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
10
+
11
+
12
+ def render() -> None:
13
+ global BENCHMARK_RUNS_CHECKBOX_GROUP
14
+ global BENCHMARK_CYCLES_SLIDER
15
+
16
+ BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup(
17
+ label = wording.get('benchmark_runs_checkbox_group_label'),
18
+ value = list(BENCHMARKS.keys()),
19
+ choices = list(BENCHMARKS.keys())
20
+ )
21
+ BENCHMARK_CYCLES_SLIDER = gradio.Slider(
22
+ label = wording.get('benchmark_cycles_slider_label'),
23
+ value = 3,
24
+ step = 1,
25
+ minimum = 1,
26
+ maximum = 10
27
+ )
28
+ register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP)
29
+ register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER)
facefusion/uis/components/common_options.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ from facefusion import wording
6
+ from facefusion.uis import choices as uis_choices
7
+
8
+ COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
9
+
10
+
11
+ def render() -> None:
12
+ global COMMON_OPTIONS_CHECKBOX_GROUP
13
+
14
+ value = []
15
+ if facefusion.globals.keep_fps:
16
+ value.append('keep-fps')
17
+ if facefusion.globals.keep_temp:
18
+ value.append('keep-temp')
19
+ if facefusion.globals.skip_audio:
20
+ value.append('skip-audio')
21
+ if facefusion.globals.skip_download:
22
+ value.append('skip-download')
23
+ COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
24
+ label = wording.get('common_options_checkbox_group_label'),
25
+ choices = uis_choices.common_options,
26
+ value = value
27
+ )
28
+
29
+
30
+ def listen() -> None:
31
+ COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP)
32
+
33
+
34
+ def update(common_options : List[str]) -> None:
35
+ facefusion.globals.keep_fps = 'keep-fps' in common_options
36
+ facefusion.globals.keep_temp = 'keep-temp' in common_options
37
+ facefusion.globals.skip_audio = 'skip-audio' in common_options
38
+ facefusion.globals.skip_download = 'skip-download' in common_options
facefusion/uis/components/execution.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import gradio
3
+ import onnxruntime
4
+
5
+ import facefusion.globals
6
+ from facefusion import wording
7
+ from facefusion.face_analyser import clear_face_analyser
8
+ from facefusion.processors.frame.core import clear_frame_processors_modules
9
+ from facefusion.utilities import encode_execution_providers, decode_execution_providers
10
+
11
+ EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
12
+
13
+
14
+ def render() -> None:
15
+ global EXECUTION_PROVIDERS_CHECKBOX_GROUP
16
+
17
+ EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
18
+ label = wording.get('execution_providers_checkbox_group_label'),
19
+ choices = encode_execution_providers(onnxruntime.get_available_providers()),
20
+ value = encode_execution_providers(facefusion.globals.execution_providers)
21
+ )
22
+
23
+
24
+ def listen() -> None:
25
+ EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
26
+
27
+
28
+ def update_execution_providers(execution_providers : List[str]) -> gradio.CheckboxGroup:
29
+ clear_face_analyser()
30
+ clear_frame_processors_modules()
31
+ if not execution_providers:
32
+ execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
33
+ facefusion.globals.execution_providers = decode_execution_providers(execution_providers)
34
+ return gradio.CheckboxGroup(value = execution_providers)
facefusion/uis/components/execution_queue_count.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ import facefusion.choices
6
+ from facefusion import wording
7
+
8
+ EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
9
+
10
+
11
+ def render() -> None:
12
+ global EXECUTION_QUEUE_COUNT_SLIDER
13
+
14
+ EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
15
+ label = wording.get('execution_queue_count_slider_label'),
16
+ value = facefusion.globals.execution_queue_count,
17
+ step = facefusion.choices.execution_queue_count_range[1] - facefusion.choices.execution_queue_count_range[0],
18
+ minimum = facefusion.choices.execution_queue_count_range[0],
19
+ maximum = facefusion.choices.execution_queue_count_range[-1]
20
+ )
21
+
22
+
23
+ def listen() -> None:
24
+ EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER)
25
+
26
+
27
+ def update_execution_queue_count(execution_queue_count : int = 1) -> None:
28
+ facefusion.globals.execution_queue_count = execution_queue_count
facefusion/uis/components/execution_thread_count.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ import facefusion.choices
6
+ from facefusion import wording
7
+
8
+ EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
9
+
10
+
11
+ def render() -> None:
12
+ global EXECUTION_THREAD_COUNT_SLIDER
13
+
14
+ EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
15
+ label = wording.get('execution_thread_count_slider_label'),
16
+ value = facefusion.globals.execution_thread_count,
17
+ step = facefusion.choices.execution_thread_count_range[1] - facefusion.choices.execution_thread_count_range[0],
18
+ minimum = facefusion.choices.execution_thread_count_range[0],
19
+ maximum = facefusion.choices.execution_thread_count_range[-1]
20
+ )
21
+
22
+
23
+ def listen() -> None:
24
+ EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
25
+
26
+
27
+ def update_execution_thread_count(execution_thread_count : int = 1) -> None:
28
+ facefusion.globals.execution_thread_count = execution_thread_count
29
+