Spaces:
Running
on
L40S
Running
on
L40S
File size: 6,876 Bytes
4450790 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import os
import requests
from rich.console import Console
from tqdm import tqdm
import subprocess
import sys
try:
import folder_paths
except ModuleNotFoundError:
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
import folder_paths
models_to_download = {
"DeepBump": {
"size": 25.5,
"download_url": "https://github.com/HugoTini/DeepBump/raw/master/deepbump256.onnx",
"destination": "deepbump",
},
"Face Swap": {
"size": 660,
"download_url": [
"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth",
"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth",
"https://huggingface.co/deepinsight/inswapper/resolve/main/inswapper_128.onnx",
],
"destination": "insightface",
},
"GFPGAN (face enhancement)": {
"size": 332,
"download_url": [
"https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth",
"https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
# TODO: provide a way to selectively download models from "packs"
# https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth
# https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth
# https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth
],
"destination": "face_restore",
},
"FILM: Frame Interpolation for Large Motion": {
"size": 402,
"download_url": [
"https://drive.google.com/drive/folders/131_--QrieM4aQbbLWrUtbO2cGbX8-war"
],
"destination": "FILM",
},
}
console = Console()
from urllib.parse import urlparse
from pathlib import Path
def download_model(download_url, destination):
if isinstance(download_url, list):
for url in download_url:
download_model(url, destination)
return
filename = os.path.basename(urlparse(download_url).path)
response = None
if "drive.google.com" in download_url:
try:
import gdown
except ImportError:
print("Installing gdown")
subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"install",
"git+https://github.com/melMass/gdown@main",
]
)
import gdown
if "/folders/" in download_url:
# download folder
try:
gdown.download_folder(download_url, output=destination, resume=True)
except TypeError:
gdown.download_folder(download_url, output=destination)
return
# download from google drive
gdown.download(download_url, destination, quiet=False, resume=True)
return
response = requests.get(download_url, stream=True)
total_size = int(response.headers.get("content-length", 0))
destination_path = os.path.join(destination, filename)
with open(destination_path, "wb") as file:
with tqdm(
total=total_size, unit="B", unit_scale=True, desc=destination_path, ncols=80
) as progress_bar:
for data in response.iter_content(chunk_size=4096):
file.write(data)
progress_bar.update(len(data))
console.print(
f"Downloaded model from {download_url} to {destination_path}",
style="bold green",
)
def ask_user_for_downloads(models_to_download):
console.print("Choose models to download:")
choices = {}
for i, model_name in enumerate(models_to_download.keys(), start=1):
choices[str(i)] = model_name
console.print(f"{i}. {model_name}")
console.print(
"Enter the numbers of the models you want to download (comma-separated):"
)
user_input = console.input(">> ")
selected_models = user_input.split(",")
models_to_download_selected = {}
for choice in selected_models:
choice = choice.strip()
if choice in choices:
model_name = choices[choice]
models_to_download_selected[model_name] = models_to_download[model_name]
elif choice == "":
# download all
models_to_download_selected = models_to_download
else:
console.print(f"Invalid choice: {choice}. Skipping.")
return models_to_download_selected
def handle_interrupt():
console.print("Interrupted by user.", style="bold red")
def main(models_to_download, skip_input=False):
try:
models_to_download_selected = {}
def check_destination(urls, destination):
if isinstance(urls, list):
for url in urls:
check_destination(url, destination)
return
filename = os.path.basename(urlparse(urls).path)
destination = os.path.join(folder_paths.models_dir, destination)
if not os.path.exists(destination):
os.makedirs(destination)
destination_path = os.path.join(destination, filename)
if os.path.exists(destination_path):
url_name = os.path.basename(urlparse(urls).path)
console.print(
f"Checkpoint '{url_name}' for {model_name} already exists in '{destination}'"
)
else:
model_details["destination"] = destination
models_to_download_selected[model_name] = model_details
for model_name, model_details in models_to_download.items():
destination = model_details["destination"]
download_url = model_details["download_url"]
check_destination(download_url, destination)
if not models_to_download_selected:
console.print("No new models to download.")
return
models_to_download_selected = (
ask_user_for_downloads(models_to_download_selected)
if not skip_input
else models_to_download_selected
)
for model_name, model_details in models_to_download_selected.items():
download_url = model_details["download_url"]
destination = model_details["destination"]
console.print(f"Downloading {model_name}...")
download_model(download_url, destination)
except KeyboardInterrupt:
handle_interrupt()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-y", "--yes", action="store_true", help="skip user input")
args = parser.parse_args()
main(models_to_download, args.yes)
|