File size: 1,781 Bytes
ac3ad80
0fc3ad0
 
 
 
ac3ad80
0fc3ad0
 
 
 
 
 
 
 
 
 
 
ac3ad80
0fc3ad0
 
 
 
 
 
 
 
ac3ad80
0fc3ad0
 
ac3ad80
0fc3ad0
 
ac3ad80
0fc3ad0
ac3ad80
0fc3ad0
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import os
import requests as rq
from git import Repo as rp
import concurrent.futures as cf
from tqdm import tqdm

def download_file(url, save_directory, filename):
    filepath = os.path.join(save_directory, filename)
    response = rq.get(url, stream=True)
    total_size = int(response.headers.get('content-length', 0))
    block_size = 1024
    progress_bar = tqdm(total=total_size, unit='B', unit_scale=True)
    with open(filepath, "wb") as f:
        for data in response.iter_content(block_size):
            progress_bar.update(len(data))
            f.write(data)
    progress_bar.close()

def main():
    small_files = [
        #loras here:
    ]
    big_files = [
        # big files here:
        ("https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors?download=true", "/home/user/app/stable-diffusion-webui/models/Stable-diffusion/", "sdxl.safetensors")
    ]

    with cf.ThreadPoolExecutor(max_workers=2) as small_executor:
        small_futures = [small_executor.submit(download_file, *url_info) for url_info in small_files]

    with cf.ThreadPoolExecutor(max_workers=16) as big_executor:
        big_futures = [big_executor.submit(download_file, *url_info) for url_info in big_files]

    cf.wait(small_futures + big_futures)

if __name__ == "__main__":
    rp.clone_from("https://github.com/lllyasviel/stable-diffusion-webui-forge.git", "/home/user/app/stable-diffusion-webui")
    os.makedirs("/home/user/app/stable-diffusion-webui/models/Lora", exist_ok=True)
    main()
    os.system(f"python /home/user/app/stable-diffusion-webui/launch.py --ui-config-file /home/user/app/ui-config.json --cors-allow-origins huggingface.co,hf.space --api --xformers --precision full --no-half --skip-torch-cuda-test")