Rohit Kochikkat Francis commited on
Commit
e7cae83
1 Parent(s): 4b144b9
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .editorconfig +8 -0
  2. .flake8 +3 -0
  3. .gitattributes +1 -0
  4. .github/FUNDING.yml +2 -0
  5. .github/preview.png +3 -0
  6. .github/workflows/ci.yml +35 -0
  7. .gitignore +3 -0
  8. LICENSE.md +3 -0
  9. README.md +112 -13
  10. facefusion.ini +72 -0
  11. facefusion/__init__.py +0 -0
  12. facefusion/__pycache__/__init__.cpython-310.pyc +0 -0
  13. facefusion/__pycache__/__init__.cpython-311.pyc +0 -0
  14. facefusion/__pycache__/audio.cpython-310.pyc +0 -0
  15. facefusion/__pycache__/choices.cpython-310.pyc +0 -0
  16. facefusion/__pycache__/common_helper.cpython-310.pyc +0 -0
  17. facefusion/__pycache__/config.cpython-310.pyc +0 -0
  18. facefusion/__pycache__/content_analyser.cpython-310.pyc +0 -0
  19. facefusion/__pycache__/core.cpython-310.pyc +0 -0
  20. facefusion/__pycache__/download.cpython-310.pyc +0 -0
  21. facefusion/__pycache__/execution.cpython-310.pyc +0 -0
  22. facefusion/__pycache__/face_analyser.cpython-310.pyc +0 -0
  23. facefusion/__pycache__/face_helper.cpython-310.pyc +0 -0
  24. facefusion/__pycache__/face_masker.cpython-310.pyc +0 -0
  25. facefusion/__pycache__/face_store.cpython-310.pyc +0 -0
  26. facefusion/__pycache__/ffmpeg.cpython-310.pyc +0 -0
  27. facefusion/__pycache__/filesystem.cpython-310.pyc +0 -0
  28. facefusion/__pycache__/globals.cpython-310.pyc +0 -0
  29. facefusion/__pycache__/installer.cpython-310.pyc +0 -0
  30. facefusion/__pycache__/installer.cpython-311.pyc +0 -0
  31. facefusion/__pycache__/logger.cpython-310.pyc +0 -0
  32. facefusion/__pycache__/memory.cpython-310.pyc +0 -0
  33. facefusion/__pycache__/metadata.cpython-310.pyc +0 -0
  34. facefusion/__pycache__/metadata.cpython-311.pyc +0 -0
  35. facefusion/__pycache__/normalizer.cpython-310.pyc +0 -0
  36. facefusion/__pycache__/process_manager.cpython-310.pyc +0 -0
  37. facefusion/__pycache__/statistics.cpython-310.pyc +0 -0
  38. facefusion/__pycache__/thread_helper.cpython-310.pyc +0 -0
  39. facefusion/__pycache__/typing.cpython-310.pyc +0 -0
  40. facefusion/__pycache__/vision.cpython-310.pyc +0 -0
  41. facefusion/__pycache__/voice_extractor.cpython-310.pyc +0 -0
  42. facefusion/__pycache__/wording.cpython-310.pyc +0 -0
  43. facefusion/__pycache__/wording.cpython-311.pyc +0 -0
  44. facefusion/audio.py +137 -0
  45. facefusion/choices.py +37 -0
  46. facefusion/common_helper.py +18 -0
  47. facefusion/config.py +92 -0
  48. facefusion/content_analyser.py +112 -0
  49. facefusion/core.py +416 -0
  50. facefusion/download.py +48 -0
.editorconfig ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ root = true
2
+
3
+ [*]
4
+ end_of_line = lf
5
+ insert_final_newline = true
6
+ indent_size = 4
7
+ indent_style = tab
8
+ trim_trailing_whitespace = true
.flake8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [flake8]
2
+ select = E3, E4, F
3
+ per-file-ignores = facefusion/core.py:E402
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
.github/FUNDING.yml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ github: henryruhs
2
+ custom: https://paypal.me/henryruhs
.github/preview.png ADDED

Git LFS Details

  • SHA256: b95e8f371bb61701095b97c76df17ba51f903c613f7ccc9b2195c4b0cef066c7
  • Pointer size: 132 Bytes
  • Size of remote file: 1.22 MB
.github/workflows/ci.yml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: ci
2
+
3
+ on: [ push, pull_request ]
4
+
5
+ jobs:
6
+ lint:
7
+ runs-on: ubuntu-latest
8
+ steps:
9
+ - name: Checkout
10
+ uses: actions/checkout@v2
11
+ - name: Set up Python 3.10
12
+ uses: actions/setup-python@v2
13
+ with:
14
+ python-version: '3.10'
15
+ - run: pip install flake8
16
+ - run: pip install mypy
17
+ - run: flake8 run.py facefusion tests
18
+ - run: mypy run.py facefusion tests
19
+ test:
20
+ strategy:
21
+ matrix:
22
+ os: [ macos-latest, ubuntu-latest, windows-latest ]
23
+ runs-on: ${{ matrix.os }}
24
+ steps:
25
+ - name: Checkout
26
+ uses: actions/checkout@v2
27
+ - name: Set up ffmpeg
28
+ uses: FedericoCarboni/setup-ffmpeg@v2
29
+ - name: Set up Python 3.10
30
+ uses: actions/setup-python@v2
31
+ with:
32
+ python-version: '3.10'
33
+ - run: python install.py --onnxruntime default --skip-conda
34
+ - run: pip install pytest
35
+ - run: pytest
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .assets
2
+ .idea
3
+ .vscode
LICENSE.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ MIT license
2
+
3
+ Copyright (c) 2023 Henry Ruhs
README.md CHANGED
@@ -1,13 +1,112 @@
1
- ---
2
- title: Facefusion
3
- emoji: 📊
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 4.27.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FaceFusion
2
+ ==========
3
+
4
+ > Next generation face swapper and enhancer.
5
+
6
+ [![Build Status](https://img.shields.io/github/actions/workflow/status/facefusion/facefusion/ci.yml.svg?branch=master)](https://github.com/facefusion/facefusion/actions?query=workflow:ci)
7
+ ![License](https://img.shields.io/badge/license-MIT-green)
8
+
9
+
10
+ Preview
11
+ -------
12
+
13
+ ![Preview](https://raw.githubusercontent.com/facefusion/facefusion/master/.github/preview.png?sanitize=true)
14
+
15
+
16
+ Installation
17
+ ------------
18
+
19
+ Be aware, the installation needs technical skills and is not for beginners. Please do not open platform and installation related issues on GitHub. We have a very helpful [Discord](https://join.facefusion.io) community that will guide you to complete the installation.
20
+
21
+ Get started with the [installation](https://docs.facefusion.io/installation) guide.
22
+
23
+
24
+ Usage
25
+ -----
26
+
27
+ Run the command:
28
+
29
+ ```
30
+ python run.py [options]
31
+
32
+ options:
33
+ -h, --help show this help message and exit
34
+ -s SOURCE_PATHS, --source SOURCE_PATHS choose single or multiple source images or audios
35
+ -t TARGET_PATH, --target TARGET_PATH choose single target image or video
36
+ -o OUTPUT_PATH, --output OUTPUT_PATH specify the output file or directory
37
+ -v, --version show program's version number and exit
38
+
39
+ misc:
40
+ --force-download force automate downloads and exit
41
+ --skip-download omit automate downloads and remote lookups
42
+ --headless run the program without a user interface
43
+ --log-level {error,warn,info,debug} adjust the message severity displayed in the terminal
44
+
45
+ execution:
46
+ --execution-providers EXECUTION_PROVIDERS [EXECUTION_PROVIDERS ...] accelerate the model inference using different providers (choices: cpu, ...)
47
+ --execution-thread-count [1-128] specify the amount of parallel threads while processing
48
+ --execution-queue-count [1-32] specify the amount of frames each thread is processing
49
+
50
+ memory:
51
+ --video-memory-strategy {strict,moderate,tolerant} balance fast frame processing and low VRAM usage
52
+ --system-memory-limit [0-128] limit the available RAM that can be used while processing
53
+
54
+ face analyser:
55
+ --face-analyser-order {left-right,right-left,top-bottom,bottom-top,small-large,large-small,best-worst,worst-best} specify the order in which the face analyser detects faces
56
+ --face-analyser-age {child,teen,adult,senior} filter the detected faces based on their age
57
+ --face-analyser-gender {female,male} filter the detected faces based on their gender
58
+ --face-detector-model {many,retinaface,scrfd,yoloface,yunet} choose the model responsible for detecting the face
59
+ --face-detector-size FACE_DETECTOR_SIZE specify the size of the frame provided to the face detector
60
+ --face-detector-score [0.0-1.0] filter the detected faces base on the confidence score
61
+ --face-landmarker-score [0.0-1.0] filter the detected landmarks base on the confidence score
62
+
63
+ face selector:
64
+ --face-selector-mode {many,one,reference} use reference based tracking or simple matching
65
+ --reference-face-position REFERENCE_FACE_POSITION specify the position used to create the reference face
66
+ --reference-face-distance [0.0-1.5] specify the desired similarity between the reference face and target face
67
+ --reference-frame-number REFERENCE_FRAME_NUMBER specify the frame used to create the reference face
68
+
69
+ face mask:
70
+ --face-mask-types FACE_MASK_TYPES [FACE_MASK_TYPES ...] mix and match different face mask types (choices: box, occlusion, region)
71
+ --face-mask-blur [0.0-1.0] specify the degree of blur applied the box mask
72
+ --face-mask-padding FACE_MASK_PADDING [FACE_MASK_PADDING ...] apply top, right, bottom and left padding to the box mask
73
+ --face-mask-regions FACE_MASK_REGIONS [FACE_MASK_REGIONS ...] choose the facial features used for the region mask (choices: skin, left-eyebrow, right-eyebrow, left-eye, right-eye, glasses, nose, mouth, upper-lip, lower-lip)
74
+
75
+ frame extraction:
76
+ --trim-frame-start TRIM_FRAME_START specify the the start frame of the target video
77
+ --trim-frame-end TRIM_FRAME_END specify the the end frame of the target video
78
+ --temp-frame-format {bmp,jpg,png} specify the temporary resources format
79
+ --keep-temp keep the temporary resources after processing
80
+
81
+ output creation:
82
+ --output-image-quality [0-100] specify the image quality which translates to the compression factor
83
+ --output-image-resolution OUTPUT_IMAGE_RESOLUTION specify the image output resolution based on the target image
84
+ --output-video-encoder {libx264,libx265,libvpx-vp9,h264_nvenc,hevc_nvenc,h264_amf,hevc_amf} specify the encoder use for the video compression
85
+ --output-video-preset {ultrafast,superfast,veryfast,faster,fast,medium,slow,slower,veryslow} balance fast video processing and video file size
86
+ --output-video-quality [0-100] specify the video quality which translates to the compression factor
87
+ --output-video-resolution OUTPUT_VIDEO_RESOLUTION specify the video output resolution based on the target video
88
+ --output-video-fps OUTPUT_VIDEO_FPS specify the video output fps based on the target video
89
+ --skip-audio omit the audio from the target video
90
+
91
+ frame processors:
92
+ --frame-processors FRAME_PROCESSORS [FRAME_PROCESSORS ...] load a single or multiple frame processors. (choices: face_debugger, face_enhancer, face_swapper, frame_colorizer, frame_enhancer, lip_syncer, ...)
93
+ --face-debugger-items FACE_DEBUGGER_ITEMS [FACE_DEBUGGER_ITEMS ...] load a single or multiple frame processors (choices: bounding-box, face-landmark-5, face-landmark-5/68, face-landmark-68, face-landmark-68/5, face-mask, face-detector-score, face-landmarker-score, age, gender)
94
+ --face-enhancer-model {codeformer,gfpgan_1.2,gfpgan_1.3,gfpgan_1.4,gpen_bfr_256,gpen_bfr_512,gpen_bfr_1024,gpen_bfr_2048,restoreformer_plus_plus} choose the model responsible for enhancing the face
95
+ --face-enhancer-blend [0-100] blend the enhanced into the previous face
96
+ --face-swapper-model {blendswap_256,inswapper_128,inswapper_128_fp16,simswap_256,simswap_512_unofficial,uniface_256} choose the model responsible for swapping the face
97
+ --frame-colorizer-model {ddcolor,ddcolor_artistic,deoldify,deoldify_artistic,deoldify_stable} choose the model responsible for colorizing the frame
98
+ --frame-colorizer-blend [0-100] blend the colorized into the previous frame
99
+ --frame-colorizer-size {192x192,256x256,384x384,512x512} specify the size of the frame provided to the frame colorizer
100
+ --frame-enhancer-model {lsdir_x4,nomos8k_sc_x4,real_esrgan_x2,real_esrgan_x2_fp16,real_esrgan_x4,real_esrgan_x4_fp16,real_hatgan_x4,span_kendata_x4} choose the model responsible for enhancing the frame
101
+ --frame-enhancer-blend [0-100] blend the enhanced into the previous frame
102
+ --lip-syncer-model {wav2lip_gan} choose the model responsible for syncing the lips
103
+
104
+ uis:
105
+ --ui-layouts UI_LAYOUTS [UI_LAYOUTS ...] launch a single or multiple UI layouts (choices: benchmark, default, webcam, ...)
106
+ ```
107
+
108
+
109
+ Documentation
110
+ -------------
111
+
112
+ Read the [documentation](https://docs.facefusion.io) for a deep dive.
facefusion.ini ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [general]
2
+ source_paths =
3
+ target_path =
4
+ output_path =
5
+
6
+ [misc]
7
+ force_download =
8
+ skip_download =
9
+ headless =
10
+ log_level =
11
+
12
+ [execution]
13
+ execution_providers =
14
+ execution_thread_count =
15
+ execution_queue_count =
16
+
17
+ [memory]
18
+ video_memory_strategy =
19
+ system_memory_limit =
20
+
21
+ [face_analyser]
22
+ face_analyser_order =
23
+ face_analyser_age =
24
+ face_analyser_gender =
25
+ face_detector_model =
26
+ face_detector_size =
27
+ face_detector_score =
28
+ face_landmarker_score =
29
+
30
+ [face_selector]
31
+ face_selector_mode =
32
+ reference_face_position =
33
+ reference_face_distance =
34
+ reference_frame_number =
35
+
36
+ [face_mask]
37
+ face_mask_types =
38
+ face_mask_blur =
39
+ face_mask_padding =
40
+ face_mask_regions =
41
+
42
+ [frame_extraction]
43
+ trim_frame_start =
44
+ trim_frame_end =
45
+ temp_frame_format =
46
+ keep_temp =
47
+
48
+ [output_creation]
49
+ output_image_quality =
50
+ output_image_resolution =
51
+ output_video_encoder =
52
+ output_video_preset =
53
+ output_video_quality =
54
+ output_video_resolution =
55
+ output_video_fps =
56
+ skip_audio =
57
+
58
+ [frame_processors]
59
+ frame_processors =
60
+ face_debugger_items =
61
+ face_enhancer_model =
62
+ face_enhancer_blend =
63
+ face_swapper_model =
64
+ frame_colorizer_model =
65
+ frame_colorizer_blend =
66
+ frame_colorizer_size =
67
+ frame_enhancer_model =
68
+ frame_enhancer_blend =
69
+ lip_syncer_model =
70
+
71
+ [uis]
72
+ ui_layouts =
facefusion/__init__.py ADDED
File without changes
facefusion/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (146 Bytes). View file
 
facefusion/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (162 Bytes). View file
 
facefusion/__pycache__/audio.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
facefusion/__pycache__/choices.cpython-310.pyc ADDED
Binary file (2.74 kB). View file
 
facefusion/__pycache__/common_helper.cpython-310.pyc ADDED
Binary file (975 Bytes). View file
 
facefusion/__pycache__/config.cpython-310.pyc ADDED
Binary file (2.49 kB). View file
 
facefusion/__pycache__/content_analyser.cpython-310.pyc ADDED
Binary file (3.98 kB). View file
 
facefusion/__pycache__/core.cpython-310.pyc ADDED
Binary file (17.7 kB). View file
 
facefusion/__pycache__/download.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
facefusion/__pycache__/execution.cpython-310.pyc ADDED
Binary file (3.63 kB). View file
 
facefusion/__pycache__/face_analyser.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
facefusion/__pycache__/face_helper.cpython-310.pyc ADDED
Binary file (6.37 kB). View file
 
facefusion/__pycache__/face_masker.cpython-310.pyc ADDED
Binary file (5.61 kB). View file
 
facefusion/__pycache__/face_store.cpython-310.pyc ADDED
Binary file (1.64 kB). View file
 
facefusion/__pycache__/ffmpeg.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
facefusion/__pycache__/filesystem.cpython-310.pyc ADDED
Binary file (4.59 kB). View file
 
facefusion/__pycache__/globals.cpython-310.pyc ADDED
Binary file (2.42 kB). View file
 
facefusion/__pycache__/installer.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
facefusion/__pycache__/installer.cpython-311.pyc ADDED
Binary file (6.1 kB). View file
 
facefusion/__pycache__/logger.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
facefusion/__pycache__/memory.cpython-310.pyc ADDED
Binary file (727 Bytes). View file
 
facefusion/__pycache__/metadata.cpython-310.pyc ADDED
Binary file (473 Bytes). View file
 
facefusion/__pycache__/metadata.cpython-311.pyc ADDED
Binary file (567 Bytes). View file
 
facefusion/__pycache__/normalizer.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
facefusion/__pycache__/process_manager.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
facefusion/__pycache__/statistics.cpython-310.pyc ADDED
Binary file (1.72 kB). View file
 
facefusion/__pycache__/thread_helper.cpython-310.pyc ADDED
Binary file (915 Bytes). View file
 
facefusion/__pycache__/typing.cpython-310.pyc ADDED
Binary file (3.23 kB). View file
 
facefusion/__pycache__/vision.cpython-310.pyc ADDED
Binary file (6.62 kB). View file
 
facefusion/__pycache__/voice_extractor.cpython-310.pyc ADDED
Binary file (4.78 kB). View file
 
facefusion/__pycache__/wording.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
facefusion/__pycache__/wording.cpython-311.pyc ADDED
Binary file (13.2 kB). View file
 
facefusion/audio.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Any, List
2
+ from functools import lru_cache
3
+ import numpy
4
+ import scipy
5
+
6
+ from facefusion.filesystem import is_audio
7
+ from facefusion.ffmpeg import read_audio_buffer
8
+ from facefusion.typing import Fps, Audio, AudioFrame, Spectrogram, MelFilterBank
9
+ from facefusion.voice_extractor import batch_extract_voice
10
+
11
+
12
+ @lru_cache(maxsize = 128)
13
+ def read_static_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
14
+ return read_audio(audio_path, fps)
15
+
16
+
17
+ def read_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
18
+ sample_rate = 48000
19
+ channel_total = 2
20
+
21
+ if is_audio(audio_path):
22
+ audio_buffer = read_audio_buffer(audio_path, sample_rate, channel_total)
23
+ audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2)
24
+ audio = prepare_audio(audio)
25
+ spectrogram = create_spectrogram(audio)
26
+ audio_frames = extract_audio_frames(spectrogram, fps)
27
+ return audio_frames
28
+ return None
29
+
30
+
31
+ @lru_cache(maxsize = 128)
32
+ def read_static_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
33
+ return read_voice(audio_path, fps)
34
+
35
+
36
+ def read_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]:
37
+ sample_rate = 48000
38
+ channel_total = 2
39
+ chunk_size = 1024 * 240
40
+ step_size = 1024 * 180
41
+
42
+ if is_audio(audio_path):
43
+ audio_buffer = read_audio_buffer(audio_path, sample_rate, channel_total)
44
+ audio = numpy.frombuffer(audio_buffer, dtype = numpy.int16).reshape(-1, 2)
45
+ audio = batch_extract_voice(audio, chunk_size, step_size)
46
+ audio = prepare_voice(audio)
47
+ spectrogram = create_spectrogram(audio)
48
+ audio_frames = extract_audio_frames(spectrogram, fps)
49
+ return audio_frames
50
+ return None
51
+
52
+
53
+ def get_audio_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]:
54
+ if is_audio(audio_path):
55
+ audio_frames = read_static_audio(audio_path, fps)
56
+ if frame_number in range(len(audio_frames)):
57
+ return audio_frames[frame_number]
58
+ return None
59
+
60
+
61
+ def get_voice_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]:
62
+ if is_audio(audio_path):
63
+ voice_frames = read_static_voice(audio_path, fps)
64
+ if frame_number in range(len(voice_frames)):
65
+ return voice_frames[frame_number]
66
+ return None
67
+
68
+
69
+ def create_empty_audio_frame() -> AudioFrame:
70
+ mel_filter_total = 80
71
+ step_size = 16
72
+ audio_frame = numpy.zeros((mel_filter_total, step_size)).astype(numpy.int16)
73
+ return audio_frame
74
+
75
+
76
+ def prepare_audio(audio : numpy.ndarray[Any, Any]) -> Audio:
77
+ if audio.ndim > 1:
78
+ audio = numpy.mean(audio, axis = 1)
79
+ audio = audio / numpy.max(numpy.abs(audio), axis = 0)
80
+ audio = scipy.signal.lfilter([ 1.0, -0.97 ], [ 1.0 ], audio)
81
+ return audio
82
+
83
+
84
+ def prepare_voice(audio : numpy.ndarray[Any, Any]) -> Audio:
85
+ sample_rate = 48000
86
+ resample_rate = 16000
87
+
88
+ audio = scipy.signal.resample(audio, int(len(audio) * resample_rate / sample_rate))
89
+ audio = prepare_audio(audio)
90
+ return audio
91
+
92
+
93
+ def convert_hertz_to_mel(hertz : float) -> float:
94
+ return 2595 * numpy.log10(1 + hertz / 700)
95
+
96
+
97
+ def convert_mel_to_hertz(mel : numpy.ndarray[Any, Any]) -> numpy.ndarray[Any, Any]:
98
+ return 700 * (10 ** (mel / 2595) - 1)
99
+
100
+
101
+ def create_mel_filter_bank() -> MelFilterBank:
102
+ mel_filter_total = 80
103
+ mel_bin_total = 800
104
+ sample_rate = 16000
105
+ min_frequency = 55.0
106
+ max_frequency = 7600.0
107
+ mel_filter_bank = numpy.zeros((mel_filter_total, mel_bin_total // 2 + 1))
108
+ mel_frequency_range = numpy.linspace(convert_hertz_to_mel(min_frequency), convert_hertz_to_mel(max_frequency), mel_filter_total + 2)
109
+ indices = numpy.floor((mel_bin_total + 1) * convert_mel_to_hertz(mel_frequency_range) / sample_rate).astype(numpy.int16)
110
+
111
+ for index in range(mel_filter_total):
112
+ start = indices[index]
113
+ end = indices[index + 1]
114
+ mel_filter_bank[index, start:end] = scipy.signal.windows.triang(end - start)
115
+ return mel_filter_bank
116
+
117
+
118
+ def create_spectrogram(audio : Audio) -> Spectrogram:
119
+ mel_bin_total = 800
120
+ mel_bin_overlap = 600
121
+ mel_filter_bank = create_mel_filter_bank()
122
+ spectrogram = scipy.signal.stft(audio, nperseg = mel_bin_total, nfft = mel_bin_total, noverlap = mel_bin_overlap)[2]
123
+ spectrogram = numpy.dot(mel_filter_bank, numpy.abs(spectrogram))
124
+ return spectrogram
125
+
126
+
127
+ def extract_audio_frames(spectrogram : Spectrogram, fps : Fps) -> List[AudioFrame]:
128
+ mel_filter_total = 80
129
+ step_size = 16
130
+ audio_frames = []
131
+ indices = numpy.arange(0, spectrogram.shape[1], mel_filter_total / fps).astype(numpy.int16)
132
+ indices = indices[indices >= step_size]
133
+
134
+ for index in indices:
135
+ start = max(0, index - step_size)
136
+ audio_frames.append(spectrogram[:, start:index])
137
+ return audio_frames
facefusion/choices.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict
2
+
3
+ from facefusion.typing import VideoMemoryStrategy, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel, FaceMaskType, FaceMaskRegion, TempFrameFormat, OutputVideoEncoder, OutputVideoPreset
4
+ from facefusion.common_helper import create_int_range, create_float_range
5
+
6
+ video_memory_strategies : List[VideoMemoryStrategy] = [ 'strict', 'moderate', 'tolerant' ]
7
+ face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
8
+ face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
9
+ face_analyser_genders : List[FaceAnalyserGender] = [ 'female', 'male' ]
10
+ face_detector_set : Dict[FaceDetectorModel, List[str]] =\
11
+ {
12
+ 'many': [ '640x640' ],
13
+ 'retinaface': [ '160x160', '320x320', '480x480', '512x512', '640x640' ],
14
+ 'scrfd': [ '160x160', '320x320', '480x480', '512x512', '640x640' ],
15
+ 'yoloface': [ '640x640' ],
16
+ 'yunet': [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
17
+ }
18
+ face_selector_modes : List[FaceSelectorMode] = [ 'many', 'one', 'reference' ]
19
+ face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ]
20
+ face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ]
21
+ temp_frame_formats : List[TempFrameFormat] = [ 'bmp', 'jpg', 'png' ]
22
+ output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc', 'h264_amf', 'hevc_amf' ]
23
+ output_video_presets : List[OutputVideoPreset] = [ 'ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow' ]
24
+
25
+ image_template_sizes : List[float] = [ 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4 ]
26
+ video_template_sizes : List[int] = [ 240, 360, 480, 540, 720, 1080, 1440, 2160, 4320 ]
27
+
28
+ execution_thread_count_range : List[int] = create_int_range(1, 128, 1)
29
+ execution_queue_count_range : List[int] = create_int_range(1, 32, 1)
30
+ system_memory_limit_range : List[int] = create_int_range(0, 128, 1)
31
+ face_detector_score_range : List[float] = create_float_range(0.0, 1.0, 0.05)
32
+ face_landmarker_score_range : List[float] = create_float_range(0.0, 1.0, 0.05)
33
+ face_mask_blur_range : List[float] = create_float_range(0.0, 1.0, 0.05)
34
+ face_mask_padding_range : List[int] = create_int_range(0, 100, 1)
35
+ reference_face_distance_range : List[float] = create_float_range(0.0, 1.5, 0.05)
36
+ output_image_quality_range : List[int] = create_int_range(0, 100, 1)
37
+ output_video_quality_range : List[int] = create_int_range(0, 100, 1)
facefusion/common_helper.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Any
2
+ import numpy
3
+
4
+
5
+ def create_metavar(ranges : List[Any]) -> str:
6
+ return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
7
+
8
+
9
+ def create_int_range(start : int, stop : int, step : int) -> List[int]:
10
+ return (numpy.arange(start, stop + step, step)).tolist()
11
+
12
+
13
+ def create_float_range(start : float, stop : float, step : float) -> List[float]:
14
+ return (numpy.around(numpy.arange(start, stop + step, step), decimals = 2)).tolist()
15
+
16
+
17
+ def get_first(__list__ : Any) -> Any:
18
+ return next(iter(__list__), None)
facefusion/config.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configparser import ConfigParser
2
+ from typing import Any, Optional, List
3
+
4
+ from facefusion.filesystem import resolve_relative_path
5
+
6
+ CONFIG = None
7
+
8
+
9
+ def get_config() -> ConfigParser:
10
+ global CONFIG
11
+
12
+ if CONFIG is None:
13
+ config_path = resolve_relative_path('../facefusion.ini')
14
+ CONFIG = ConfigParser()
15
+ CONFIG.read(config_path, encoding = 'utf-8')
16
+ return CONFIG
17
+
18
+
19
+ def clear_config() -> None:
20
+ global CONFIG
21
+
22
+ CONFIG = None
23
+
24
+
25
+ def get_str_value(key : str, fallback : Optional[str] = None) -> Optional[str]:
26
+ value = get_value_by_notation(key)
27
+
28
+ if value or fallback:
29
+ return str(value or fallback)
30
+ return None
31
+
32
+
33
+ def get_int_value(key : str, fallback : Optional[str] = None) -> Optional[int]:
34
+ value = get_value_by_notation(key)
35
+
36
+ if value or fallback:
37
+ return int(value or fallback)
38
+ return None
39
+
40
+
41
+ def get_float_value(key : str, fallback : Optional[str] = None) -> Optional[float]:
42
+ value = get_value_by_notation(key)
43
+
44
+ if value or fallback:
45
+ return float(value or fallback)
46
+ return None
47
+
48
+
49
+ def get_bool_value(key : str, fallback : Optional[str] = None) -> Optional[bool]:
50
+ value = get_value_by_notation(key)
51
+
52
+ if value == 'True' or fallback == 'True':
53
+ return True
54
+ if value == 'False' or fallback == 'False':
55
+ return False
56
+ return None
57
+
58
+
59
+ def get_str_list(key : str, fallback : Optional[str] = None) -> Optional[List[str]]:
60
+ value = get_value_by_notation(key)
61
+
62
+ if value or fallback:
63
+ return [ str(value) for value in (value or fallback).split(' ') ]
64
+ return None
65
+
66
+
67
+ def get_int_list(key : str, fallback : Optional[str] = None) -> Optional[List[int]]:
68
+ value = get_value_by_notation(key)
69
+
70
+ if value or fallback:
71
+ return [ int(value) for value in (value or fallback).split(' ') ]
72
+ return None
73
+
74
+
75
+ def get_float_list(key : str, fallback : Optional[str] = None) -> Optional[List[float]]:
76
+ value = get_value_by_notation(key)
77
+
78
+ if value or fallback:
79
+ return [ float(value) for value in (value or fallback).split(' ') ]
80
+ return None
81
+
82
+
83
+ def get_value_by_notation(key : str) -> Optional[Any]:
84
+ config = get_config()
85
+
86
+ if '.' in key:
87
+ section, name = key.split('.')
88
+ if section in config and name in config[section]:
89
+ return config[section][name]
90
+ if key in config:
91
+ return config[key]
92
+ return None
facefusion/content_analyser.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from functools import lru_cache
3
+ from time import sleep
4
+ import cv2
5
+ import numpy
6
+ import onnxruntime
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.globals
10
+ from facefusion import process_manager, wording
11
+ from facefusion.thread_helper import thread_lock, conditional_thread_semaphore
12
+ from facefusion.typing import VisionFrame, ModelSet, Fps
13
+ from facefusion.execution import apply_execution_provider_options
14
+ from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_video_fps
15
+ from facefusion.filesystem import resolve_relative_path, is_file
16
+ from facefusion.download import conditional_download
17
+
18
+ CONTENT_ANALYSER = None
19
+ MODELS : ModelSet =\
20
+ {
21
+ 'open_nsfw':
22
+ {
23
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/open_nsfw.onnx',
24
+ 'path': resolve_relative_path('../.assets/models/open_nsfw.onnx')
25
+ }
26
+ }
27
+ PROBABILITY_LIMIT = 0.80
28
+ RATE_LIMIT = 10
29
+ STREAM_COUNTER = 0
30
+
31
+
32
+ def get_content_analyser() -> Any:
33
+ global CONTENT_ANALYSER
34
+
35
+ with thread_lock():
36
+ while process_manager.is_checking():
37
+ sleep(0.5)
38
+ if CONTENT_ANALYSER is None:
39
+ model_path = MODELS.get('open_nsfw').get('path')
40
+ CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
41
+ return CONTENT_ANALYSER
42
+
43
+
44
+ def clear_content_analyser() -> None:
45
+ global CONTENT_ANALYSER
46
+
47
+ CONTENT_ANALYSER = None
48
+
49
+
50
+ def pre_check() -> bool:
51
+ download_directory_path = resolve_relative_path('../.assets/models')
52
+ model_url = MODELS.get('open_nsfw').get('url')
53
+ model_path = MODELS.get('open_nsfw').get('path')
54
+
55
+ if not facefusion.globals.skip_download:
56
+ process_manager.check()
57
+ conditional_download(download_directory_path, [ model_url ])
58
+ process_manager.end()
59
+ return is_file(model_path)
60
+
61
+
62
+ def analyse_stream(vision_frame : VisionFrame, video_fps : Fps) -> bool:
63
+ global STREAM_COUNTER
64
+
65
+ STREAM_COUNTER = STREAM_COUNTER + 1
66
+ if STREAM_COUNTER % int(video_fps) == 0:
67
+ return analyse_frame(vision_frame)
68
+ return False
69
+
70
+
71
+ def analyse_frame(vision_frame : VisionFrame) -> bool:
72
+ content_analyser = get_content_analyser()
73
+ vision_frame = prepare_frame(vision_frame)
74
+ with conditional_thread_semaphore(facefusion.globals.execution_providers):
75
+ probability = content_analyser.run(None,
76
+ {
77
+ content_analyser.get_inputs()[0].name: vision_frame
78
+ })[0][0][1]
79
+ return probability > PROBABILITY_LIMIT
80
+
81
+
82
+ def prepare_frame(vision_frame : VisionFrame) -> VisionFrame:
83
+ vision_frame = cv2.resize(vision_frame, (224, 224)).astype(numpy.float32)
84
+ vision_frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32)
85
+ vision_frame = numpy.expand_dims(vision_frame, axis = 0)
86
+ return vision_frame
87
+
88
+
89
+ @lru_cache(maxsize = None)
90
+ def analyse_image(image_path : str) -> bool:
91
+ frame = read_image(image_path)
92
+ return analyse_frame(frame)
93
+
94
+
95
+ @lru_cache(maxsize = None)
96
+ def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool:
97
+ video_frame_total = count_video_frame_total(video_path)
98
+ video_fps = detect_video_fps(video_path)
99
+ frame_range = range(start_frame or 0, end_frame or video_frame_total)
100
+ rate = 0.0
101
+ counter = 0
102
+
103
+ with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
104
+ for frame_number in frame_range:
105
+ if frame_number % int(video_fps) == 0:
106
+ frame = get_video_frame(video_path, frame_number)
107
+ if analyse_frame(frame):
108
+ counter += 1
109
+ rate = counter * int(video_fps) / len(frame_range) * 100
110
+ progress.update()
111
+ progress.set_postfix(rate = rate)
112
+ return rate > RATE_LIMIT
facefusion/core.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.environ['OMP_NUM_THREADS'] = '1'
4
+
5
+ import signal
6
+ import sys
7
+ import warnings
8
+ import shutil
9
+ import numpy
10
+ import onnxruntime
11
+ from time import sleep, time
12
+ from argparse import ArgumentParser, HelpFormatter
13
+
14
+ import facefusion.choices
15
+ import facefusion.globals
16
+ from facefusion.face_analyser import get_one_face, get_average_face
17
+ from facefusion.face_store import get_reference_faces, append_reference_face
18
+ from facefusion import face_analyser, face_masker, content_analyser, config, process_manager, metadata, logger, wording, voice_extractor
19
+ from facefusion.content_analyser import analyse_image, analyse_video
20
+ from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
21
+ from facefusion.common_helper import create_metavar, get_first
22
+ from facefusion.execution import encode_execution_providers, decode_execution_providers
23
+ from facefusion.normalizer import normalize_output_path, normalize_padding, normalize_fps
24
+ from facefusion.memory import limit_system_memory
25
+ from facefusion.statistics import conditional_log_statistics
26
+ from facefusion.download import conditional_download
27
+ from facefusion.filesystem import list_directory, get_temp_frame_paths, create_temp, move_temp, clear_temp, is_image, is_video, filter_audio_paths, resolve_relative_path
28
+ from facefusion.ffmpeg import extract_frames, merge_video, copy_image, finalize_image, restore_audio, replace_audio
29
+ from facefusion.vision import read_image, read_static_images, detect_image_resolution, restrict_video_fps, create_image_resolutions, get_video_frame, detect_video_resolution, detect_video_fps, restrict_video_resolution, restrict_image_resolution, create_video_resolutions, pack_resolution, unpack_resolution
30
+
31
+ onnxruntime.set_default_logger_severity(3)
32
+ warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
33
+
34
+
35
+ def cli() -> None:
36
+ signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
37
+ program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 160), add_help = False)
38
+ # general
39
+ program.add_argument('-s', '--source', help = wording.get('help.source'), action = 'append', dest = 'source_paths', default = config.get_str_list('general.source_paths'))
40
+ program.add_argument('-t', '--target', help = wording.get('help.target'), dest = 'target_path', default = config.get_str_value('general.target_path'))
41
+ program.add_argument('-o', '--output', help = wording.get('help.output'), dest = 'output_path', default = config.get_str_value('general.output_path'))
42
+ program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
43
+ # misc
44
+ group_misc = program.add_argument_group('misc')
45
+ group_misc.add_argument('--force-download', help = wording.get('help.force_download'), action = 'store_true', default = config.get_bool_value('misc.force_download'))
46
+ group_misc.add_argument('--skip-download', help = wording.get('help.skip_download'), action = 'store_true', default = config.get_bool_value('misc.skip_download'))
47
+ group_misc.add_argument('--headless', help = wording.get('help.headless'), action = 'store_true', default = config.get_bool_value('misc.headless'))
48
+ group_misc.add_argument('--log-level', help = wording.get('help.log_level'), default = config.get_str_value('misc.log_level', 'info'), choices = logger.get_log_levels())
49
+ # execution
50
+ execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
51
+ group_execution = program.add_argument_group('execution')
52
+ group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
53
+ group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
54
+ group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
55
+ # memory
56
+ group_memory = program.add_argument_group('memory')
57
+ group_memory.add_argument('--video-memory-strategy', help = wording.get('help.video_memory_strategy'), default = config.get_str_value('memory.video_memory_strategy', 'strict'), choices = facefusion.choices.video_memory_strategies)
58
+ group_memory.add_argument('--system-memory-limit', help = wording.get('help.system_memory_limit'), type = int, default = config.get_int_value('memory.system_memory_limit', '0'), choices = facefusion.choices.system_memory_limit_range, metavar = create_metavar(facefusion.choices.system_memory_limit_range))
59
+ # face analyser
60
+ group_face_analyser = program.add_argument_group('face analyser')
61
+ group_face_analyser.add_argument('--face-analyser-order', help = wording.get('help.face_analyser_order'), default = config.get_str_value('face_analyser.face_analyser_order', 'left-right'), choices = facefusion.choices.face_analyser_orders)
62
+ group_face_analyser.add_argument('--face-analyser-age', help = wording.get('help.face_analyser_age'), default = config.get_str_value('face_analyser.face_analyser_age'), choices = facefusion.choices.face_analyser_ages)
63
+ group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('help.face_analyser_gender'), default = config.get_str_value('face_analyser.face_analyser_gender'), choices = facefusion.choices.face_analyser_genders)
64
+ group_face_analyser.add_argument('--face-detector-model', help = wording.get('help.face_detector_model'), default = config.get_str_value('face_analyser.face_detector_model', 'yoloface'), choices = facefusion.choices.face_detector_set.keys())
65
+ group_face_analyser.add_argument('--face-detector-size', help = wording.get('help.face_detector_size'), default = config.get_str_value('face_analyser.face_detector_size', '640x640'))
66
+ group_face_analyser.add_argument('--face-detector-score', help = wording.get('help.face_detector_score'), type = float, default = config.get_float_value('face_analyser.face_detector_score', '0.5'), choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
67
+ group_face_analyser.add_argument('--face-landmarker-score', help = wording.get('help.face_landmarker_score'), type = float, default = config.get_float_value('face_analyser.face_landmarker_score', '0.5'), choices = facefusion.choices.face_landmarker_score_range, metavar = create_metavar(facefusion.choices.face_landmarker_score_range))
68
+ # face selector
69
+ group_face_selector = program.add_argument_group('face selector')
70
+ group_face_selector.add_argument('--face-selector-mode', help = wording.get('help.face_selector_mode'), default = config.get_str_value('face_selector.face_selector_mode', 'reference'), choices = facefusion.choices.face_selector_modes)
71
+ group_face_selector.add_argument('--reference-face-position', help = wording.get('help.reference_face_position'), type = int, default = config.get_int_value('face_selector.reference_face_position', '0'))
72
+ group_face_selector.add_argument('--reference-face-distance', help = wording.get('help.reference_face_distance'), type = float, default = config.get_float_value('face_selector.reference_face_distance', '0.6'), choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
73
+ group_face_selector.add_argument('--reference-frame-number', help = wording.get('help.reference_frame_number'), type = int, default = config.get_int_value('face_selector.reference_frame_number', '0'))
74
+ # face mask
75
+ group_face_mask = program.add_argument_group('face mask')
76
+ group_face_mask.add_argument('--face-mask-types', help = wording.get('help.face_mask_types').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = config.get_str_list('face_mask.face_mask_types', 'box'), choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
77
+ group_face_mask.add_argument('--face-mask-blur', help = wording.get('help.face_mask_blur'), type = float, default = config.get_float_value('face_mask.face_mask_blur', '0.3'), choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
78
+ group_face_mask.add_argument('--face-mask-padding', help = wording.get('help.face_mask_padding'), type = int, default = config.get_int_list('face_mask.face_mask_padding', '0 0 0 0'), nargs = '+')
79
+ group_face_mask.add_argument('--face-mask-regions', help = wording.get('help.face_mask_regions').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = config.get_str_list('face_mask.face_mask_regions', ' '.join(facefusion.choices.face_mask_regions)), choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
80
+ # frame extraction
81
+ group_frame_extraction = program.add_argument_group('frame extraction')
82
+ group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('help.trim_frame_start'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_start'))
83
+ group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('help.trim_frame_end'), type = int, default = facefusion.config.get_int_value('frame_extraction.trim_frame_end'))
84
+ group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('help.temp_frame_format'), default = config.get_str_value('frame_extraction.temp_frame_format', 'png'), choices = facefusion.choices.temp_frame_formats)
85
+ group_frame_extraction.add_argument('--keep-temp', help = wording.get('help.keep_temp'), action = 'store_true', default = config.get_bool_value('frame_extraction.keep_temp'))
86
+ # output creation
87
+ group_output_creation = program.add_argument_group('output creation')
88
+ group_output_creation.add_argument('--output-image-quality', help = wording.get('help.output_image_quality'), type = int, default = config.get_int_value('output_creation.output_image_quality', '80'), choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
89
+ group_output_creation.add_argument('--output-image-resolution', help = wording.get('help.output_image_resolution'), default = config.get_str_value('output_creation.output_image_resolution'))
90
+ group_output_creation.add_argument('--output-video-encoder', help = wording.get('help.output_video_encoder'), default = config.get_str_value('output_creation.output_video_encoder', 'libx264'), choices = facefusion.choices.output_video_encoders)
91
+ group_output_creation.add_argument('--output-video-preset', help = wording.get('help.output_video_preset'), default = config.get_str_value('output_creation.output_video_preset', 'veryfast'), choices = facefusion.choices.output_video_presets)
92
+ group_output_creation.add_argument('--output-video-quality', help = wording.get('help.output_video_quality'), type = int, default = config.get_int_value('output_creation.output_video_quality', '80'), choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
93
+ group_output_creation.add_argument('--output-video-resolution', help = wording.get('help.output_video_resolution'), default = config.get_str_value('output_creation.output_video_resolution'))
94
+ group_output_creation.add_argument('--output-video-fps', help = wording.get('help.output_video_fps'), type = float, default = config.get_str_value('output_creation.output_video_fps'))
95
+ group_output_creation.add_argument('--skip-audio', help = wording.get('help.skip_audio'), action = 'store_true', default = config.get_bool_value('output_creation.skip_audio'))
96
+ # frame processors
97
+ available_frame_processors = list_directory('facefusion/processors/frame/modules')
98
+ program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
99
+ group_frame_processors = program.add_argument_group('frame processors')
100
+ group_frame_processors.add_argument('--frame-processors', help = wording.get('help.frame_processors').format(choices = ', '.join(available_frame_processors)), default = config.get_str_list('frame_processors.frame_processors', 'face_swapper'), nargs = '+')
101
+ for frame_processor in available_frame_processors:
102
+ frame_processor_module = load_frame_processor_module(frame_processor)
103
+ frame_processor_module.register_args(group_frame_processors)
104
+ # uis
105
+ available_ui_layouts = list_directory('facefusion/uis/layouts')
106
+ group_uis = program.add_argument_group('uis')
107
+ group_uis.add_argument('--ui-layouts', help = wording.get('help.ui_layouts').format(choices = ', '.join(available_ui_layouts)), default = config.get_str_list('uis.ui_layouts', 'default'), nargs = '+')
108
+ run(program)
109
+
110
+
111
+ def validate_args(program : ArgumentParser) -> None:
112
+ try:
113
+ for action in program._actions:
114
+ if action.default:
115
+ if isinstance(action.default, list):
116
+ for default in action.default:
117
+ program._check_value(action, default)
118
+ else:
119
+ program._check_value(action, action.default)
120
+ except Exception as exception:
121
+ program.error(str(exception))
122
+
123
+
124
+ def apply_args(program : ArgumentParser) -> None:
125
+ args = program.parse_args()
126
+ # general
127
+ facefusion.globals.source_paths = args.source_paths
128
+ facefusion.globals.target_path = args.target_path
129
+ facefusion.globals.output_path = args.output_path
130
+ # misc
131
+ facefusion.globals.force_download = args.force_download
132
+ facefusion.globals.skip_download = args.skip_download
133
+ facefusion.globals.headless = args.headless
134
+ facefusion.globals.log_level = args.log_level
135
+ # execution
136
+ facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
137
+ facefusion.globals.execution_thread_count = args.execution_thread_count
138
+ facefusion.globals.execution_queue_count = args.execution_queue_count
139
+ # memory
140
+ facefusion.globals.video_memory_strategy = args.video_memory_strategy
141
+ facefusion.globals.system_memory_limit = args.system_memory_limit
142
+ # face analyser
143
+ facefusion.globals.face_analyser_order = args.face_analyser_order
144
+ facefusion.globals.face_analyser_age = args.face_analyser_age
145
+ facefusion.globals.face_analyser_gender = args.face_analyser_gender
146
+ facefusion.globals.face_detector_model = args.face_detector_model
147
+ if args.face_detector_size in facefusion.choices.face_detector_set[args.face_detector_model]:
148
+ facefusion.globals.face_detector_size = args.face_detector_size
149
+ else:
150
+ facefusion.globals.face_detector_size = '640x640'
151
+ facefusion.globals.face_detector_score = args.face_detector_score
152
+ facefusion.globals.face_landmarker_score = args.face_landmarker_score
153
+ # face selector
154
+ facefusion.globals.face_selector_mode = args.face_selector_mode
155
+ facefusion.globals.reference_face_position = args.reference_face_position
156
+ facefusion.globals.reference_face_distance = args.reference_face_distance
157
+ facefusion.globals.reference_frame_number = args.reference_frame_number
158
+ # face mask
159
+ facefusion.globals.face_mask_types = args.face_mask_types
160
+ facefusion.globals.face_mask_blur = args.face_mask_blur
161
+ facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
162
+ facefusion.globals.face_mask_regions = args.face_mask_regions
163
+ # frame extraction
164
+ facefusion.globals.trim_frame_start = args.trim_frame_start
165
+ facefusion.globals.trim_frame_end = args.trim_frame_end
166
+ facefusion.globals.temp_frame_format = args.temp_frame_format
167
+ facefusion.globals.keep_temp = args.keep_temp
168
+ # output creation
169
+ facefusion.globals.output_image_quality = args.output_image_quality
170
+ if is_image(args.target_path):
171
+ output_image_resolution = detect_image_resolution(args.target_path)
172
+ output_image_resolutions = create_image_resolutions(output_image_resolution)
173
+ if args.output_image_resolution in output_image_resolutions:
174
+ facefusion.globals.output_image_resolution = args.output_image_resolution
175
+ else:
176
+ facefusion.globals.output_image_resolution = pack_resolution(output_image_resolution)
177
+ facefusion.globals.output_video_encoder = args.output_video_encoder
178
+ facefusion.globals.output_video_preset = args.output_video_preset
179
+ facefusion.globals.output_video_quality = args.output_video_quality
180
+ if is_video(args.target_path):
181
+ output_video_resolution = detect_video_resolution(args.target_path)
182
+ output_video_resolutions = create_video_resolutions(output_video_resolution)
183
+ if args.output_video_resolution in output_video_resolutions:
184
+ facefusion.globals.output_video_resolution = args.output_video_resolution
185
+ else:
186
+ facefusion.globals.output_video_resolution = pack_resolution(output_video_resolution)
187
+ if args.output_video_fps or is_video(args.target_path):
188
+ facefusion.globals.output_video_fps = normalize_fps(args.output_video_fps) or detect_video_fps(args.target_path)
189
+ facefusion.globals.skip_audio = args.skip_audio
190
+ # frame processors
191
+ available_frame_processors = list_directory('facefusion/processors/frame/modules')
192
+ facefusion.globals.frame_processors = args.frame_processors
193
+ for frame_processor in available_frame_processors:
194
+ frame_processor_module = load_frame_processor_module(frame_processor)
195
+ frame_processor_module.apply_args(program)
196
+ # uis
197
+ facefusion.globals.ui_layouts = args.ui_layouts
198
+
199
+
200
+ def run(program : ArgumentParser) -> None:
201
+ validate_args(program)
202
+ apply_args(program)
203
+ logger.init(facefusion.globals.log_level)
204
+
205
+ if facefusion.globals.system_memory_limit > 0:
206
+ limit_system_memory(facefusion.globals.system_memory_limit)
207
+ if facefusion.globals.force_download:
208
+ force_download()
209
+ return
210
+ if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check() or not voice_extractor.pre_check():
211
+ return
212
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
213
+ if not frame_processor_module.pre_check():
214
+ return
215
+ if facefusion.globals.headless:
216
+ conditional_process()
217
+ else:
218
+ import facefusion.uis.core as ui
219
+
220
+ for ui_layout in ui.get_ui_layouts_modules(facefusion.globals.ui_layouts):
221
+ if not ui_layout.pre_check():
222
+ return
223
+ ui.launch()
224
+
225
+
226
+ def destroy() -> None:
227
+ # process_manager.stop()
228
+ # while process_manager.is_processing():
229
+ # sleep(0.5)
230
+ # if facefusion.globals.target_path:
231
+ # clear_temp(facefusion.globals.target_path)
232
+ sys.exit(0)
233
+
234
+
235
+ def pre_check() -> bool:
236
+ if sys.version_info < (3, 9):
237
+ logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper())
238
+ return False
239
+ if not shutil.which('ffmpeg'):
240
+ logger.error(wording.get('ffmpeg_not_installed'), __name__.upper())
241
+ return False
242
+ return True
243
+
244
+
245
+ def conditional_process() -> None:
246
+ start_time = time()
247
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
248
+ while not frame_processor_module.post_check():
249
+ logger.disable()
250
+ sleep(0.5)
251
+ logger.enable()
252
+ if not frame_processor_module.pre_process('output'):
253
+ return
254
+ conditional_append_reference_faces()
255
+ if is_image(facefusion.globals.target_path):
256
+ process_image(start_time)
257
+ if is_video(facefusion.globals.target_path):
258
+ process_video(start_time)
259
+
260
+
261
+ def conditional_append_reference_faces() -> None:
262
+ if 'reference' in facefusion.globals.face_selector_mode and not get_reference_faces():
263
+ source_frames = read_static_images(facefusion.globals.source_paths)
264
+ source_face = get_average_face(source_frames)
265
+ if is_video(facefusion.globals.target_path):
266
+ reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
267
+ else:
268
+ reference_frame = read_image(facefusion.globals.target_path)
269
+ reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
270
+ append_reference_face('origin', reference_face)
271
+ if source_face and reference_face:
272
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
273
+ abstract_reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame)
274
+ if numpy.any(abstract_reference_frame):
275
+ reference_frame = abstract_reference_frame
276
+ reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
277
+ append_reference_face(frame_processor_module.__name__, reference_face)
278
+
279
+
280
+ def force_download() -> None:
281
+ download_directory_path = resolve_relative_path('../.assets/models')
282
+ available_frame_processors = list_directory('facefusion/processors/frame/modules')
283
+ model_list =\
284
+ [
285
+ content_analyser.MODELS,
286
+ face_analyser.MODELS,
287
+ face_masker.MODELS,
288
+ voice_extractor.MODELS
289
+ ]
290
+
291
+ for frame_processor_module in get_frame_processors_modules(available_frame_processors):
292
+ if hasattr(frame_processor_module, 'MODELS'):
293
+ model_list.append(frame_processor_module.MODELS)
294
+ model_urls = [ models[model].get('url') for models in model_list for model in models ]
295
+ conditional_download(download_directory_path, model_urls)
296
+
297
+
298
+ def process_image(start_time : float) -> None:
299
+ normed_output_path = normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path)
300
+ # if analyse_image(facefusion.globals.target_path):
301
+ # return
302
+ # copy image
303
+ process_manager.start()
304
+ temp_image_resolution = pack_resolution(restrict_image_resolution(facefusion.globals.target_path, unpack_resolution(facefusion.globals.output_image_resolution)))
305
+ logger.info(wording.get('copying_image').format(resolution = temp_image_resolution), __name__.upper())
306
+ if copy_image(facefusion.globals.target_path, normed_output_path, temp_image_resolution):
307
+ logger.debug(wording.get('copying_image_succeed'), __name__.upper())
308
+ else:
309
+ logger.error(wording.get('copying_image_failed'), __name__.upper())
310
+ return
311
+ # process image
312
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
313
+ logger.info(wording.get('processing'), frame_processor_module.NAME)
314
+ frame_processor_module.process_image(facefusion.globals.source_paths, normed_output_path, normed_output_path)
315
+ frame_processor_module.post_process()
316
+ if is_process_stopping():
317
+ return
318
+ # finalize image
319
+ logger.info(wording.get('finalizing_image').format(resolution = facefusion.globals.output_image_resolution), __name__.upper())
320
+ if finalize_image(normed_output_path, facefusion.globals.output_image_resolution):
321
+ logger.debug(wording.get('finalizing_image_succeed'), __name__.upper())
322
+ else:
323
+ logger.warn(wording.get('finalizing_image_skipped'), __name__.upper())
324
+ # validate image
325
+ if is_image(normed_output_path):
326
+ seconds = '{:.2f}'.format((time() - start_time) % 60)
327
+ logger.info(wording.get('processing_image_succeed').format(seconds = seconds), __name__.upper())
328
+ conditional_log_statistics()
329
+ else:
330
+ logger.error(wording.get('processing_image_failed'), __name__.upper())
331
+ process_manager.end()
332
+
333
+
334
+ def process_video(start_time : float) -> None:
335
+ normed_output_path = normalize_output_path(facefusion.globals.target_path, facefusion.globals.output_path)
336
+ # if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end):
337
+ # return
338
+ # clear temp
339
+ logger.debug(wording.get('clearing_temp'), __name__.upper())
340
+ clear_temp(facefusion.globals.target_path)
341
+ # create temp
342
+ logger.debug(wording.get('creating_temp'), __name__.upper())
343
+ create_temp(facefusion.globals.target_path)
344
+ # extract frames
345
+ process_manager.start()
346
+ temp_video_resolution = pack_resolution(restrict_video_resolution(facefusion.globals.target_path, unpack_resolution(facefusion.globals.output_video_resolution)))
347
+ temp_video_fps = restrict_video_fps(facefusion.globals.target_path, facefusion.globals.output_video_fps)
348
+ logger.info(wording.get('extracting_frames').format(resolution = temp_video_resolution, fps = temp_video_fps), __name__.upper())
349
+ if extract_frames(facefusion.globals.target_path, temp_video_resolution, temp_video_fps):
350
+ logger.debug(wording.get('extracting_frames_succeed'), __name__.upper())
351
+ else:
352
+ if is_process_stopping():
353
+ return
354
+ logger.error(wording.get('extracting_frames_failed'), __name__.upper())
355
+ return
356
+ # process frames
357
+ temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
358
+ if temp_frame_paths:
359
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
360
+ logger.info(wording.get('processing'), frame_processor_module.NAME)
361
+ frame_processor_module.process_video(facefusion.globals.source_paths, temp_frame_paths)
362
+ frame_processor_module.post_process()
363
+ if is_process_stopping():
364
+ return
365
+ else:
366
+ logger.error(wording.get('temp_frames_not_found'), __name__.upper())
367
+ return
368
+ # merge video
369
+ logger.info(wording.get('merging_video').format(resolution = facefusion.globals.output_video_resolution, fps = facefusion.globals.output_video_fps), __name__.upper())
370
+ if merge_video(facefusion.globals.target_path, facefusion.globals.output_video_resolution, facefusion.globals.output_video_fps):
371
+ logger.debug(wording.get('merging_video_succeed'), __name__.upper())
372
+ else:
373
+ if is_process_stopping():
374
+ return
375
+ logger.error(wording.get('merging_video_failed'), __name__.upper())
376
+ return
377
+ # handle audio
378
+ if facefusion.globals.skip_audio:
379
+ logger.info(wording.get('skipping_audio'), __name__.upper())
380
+ move_temp(facefusion.globals.target_path, normed_output_path)
381
+ else:
382
+ if 'lip_syncer' in facefusion.globals.frame_processors:
383
+ source_audio_path = get_first(filter_audio_paths(facefusion.globals.source_paths))
384
+ if source_audio_path and replace_audio(facefusion.globals.target_path, source_audio_path, normed_output_path):
385
+ logger.debug(wording.get('restoring_audio_succeed'), __name__.upper())
386
+ else:
387
+ if is_process_stopping():
388
+ return
389
+ logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
390
+ move_temp(facefusion.globals.target_path, normed_output_path)
391
+ else:
392
+ if restore_audio(facefusion.globals.target_path, normed_output_path, facefusion.globals.output_video_fps):
393
+ logger.debug(wording.get('restoring_audio_succeed'), __name__.upper())
394
+ else:
395
+ if is_process_stopping():
396
+ return
397
+ logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
398
+ move_temp(facefusion.globals.target_path, normed_output_path)
399
+ # clear temp
400
+ logger.debug(wording.get('clearing_temp'), __name__.upper())
401
+ clear_temp(facefusion.globals.target_path)
402
+ # validate video
403
+ if is_video(normed_output_path):
404
+ seconds = '{:.2f}'.format((time() - start_time))
405
+ logger.info(wording.get('processing_video_succeed').format(seconds = seconds), __name__.upper())
406
+ conditional_log_statistics()
407
+ else:
408
+ logger.error(wording.get('processing_video_failed'), __name__.upper())
409
+ process_manager.end()
410
+
411
+
412
+ def is_process_stopping() -> bool:
413
+ if process_manager.is_stopping():
414
+ process_manager.end()
415
+ logger.info(wording.get('processing_stopped'), __name__.upper())
416
+ return process_manager.is_pending()
facefusion/download.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import platform
4
+ import ssl
5
+ import urllib.request
6
+ from typing import List
7
+ from functools import lru_cache
8
+ from tqdm import tqdm
9
+
10
+ import facefusion.globals
11
+ from facefusion import wording
12
+ from facefusion.filesystem import is_file
13
+
14
+ if platform.system().lower() == 'darwin':
15
+ ssl._create_default_https_context = ssl._create_unverified_context
16
+
17
+
18
+ def conditional_download(download_directory_path : str, urls : List[str]) -> None:
19
+ for url in urls:
20
+ download_file_path = os.path.join(download_directory_path, os.path.basename(url))
21
+ initial_size = os.path.getsize(download_file_path) if is_file(download_file_path) else 0
22
+ download_size = get_download_size(url)
23
+ if initial_size < download_size:
24
+ with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
25
+ subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])
26
+ current_size = initial_size
27
+ while current_size < download_size:
28
+ if is_file(download_file_path):
29
+ current_size = os.path.getsize(download_file_path)
30
+ progress.update(current_size - progress.n)
31
+ if download_size and not is_download_done(url, download_file_path):
32
+ os.remove(download_file_path)
33
+ conditional_download(download_directory_path, [ url ])
34
+
35
+
36
+ @lru_cache(maxsize = None)
37
+ def get_download_size(url : str) -> int:
38
+ try:
39
+ response = urllib.request.urlopen(url, timeout = 10)
40
+ return int(response.getheader('Content-Length'))
41
+ except (OSError, ValueError):
42
+ return 0
43
+
44
+
45
+ def is_download_done(url : str, file_path : str) -> bool:
46
+ if is_file(file_path):
47
+ return get_download_size(url) == os.path.getsize(file_path)
48
+ return False