test preview iamge model downlaod
Browse files
files_cells/notebooks/en/downloading_en.ipynb
CHANGED
@@ -344,12 +344,51 @@
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
-
"!mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
|
|
|
|
|
348 |
"\n",
|
349 |
"url = \"\"\n",
|
350 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
351 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
352 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
"''' main download code '''\n",
|
354 |
"\n",
|
355 |
"def handle_manual(url):\n",
|
@@ -374,11 +413,26 @@
|
|
374 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
375 |
" # print(url, dst_dir, file_name)\n",
|
376 |
"\n",
|
377 |
-
" #
|
378 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
379 |
" if 'civitai' in url and civitai_token:\n",
|
|
|
|
|
380 |
" url = f\"{url}?token={civitai_token}\"\n",
|
381 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
" # -- GDrive --\n",
|
383 |
" if 'drive.google' in url:\n",
|
384 |
" if 'folders' in url:\n",
|
@@ -423,8 +477,7 @@
|
|
423 |
"\n",
|
424 |
"## unpucking zip files\n",
|
425 |
"def unpucking_zip_files():\n",
|
426 |
-
" directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
427 |
-
"\n",
|
428 |
" for directory in directories:\n",
|
429 |
" for root, dirs, files in os.walk(directory):\n",
|
430 |
" for file in files:\n",
|
@@ -561,21 +614,21 @@
|
|
561 |
"else:\n",
|
562 |
" if any(not file.endswith('.txt') for file in os.listdir(models_dir)):\n",
|
563 |
" print(\"\\n\\033[33m➤ Models\\033[0m\")\n",
|
564 |
-
"
|
565 |
" if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):\n",
|
566 |
" print(\"\\n\\033[33m➤ VAEs\\033[0m\")\n",
|
567 |
-
"
|
568 |
" if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):\n",
|
569 |
" print(\"\\n\\033[33m➤ Embeddings\\033[0m\")\n",
|
570 |
-
"
|
571 |
" if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):\n",
|
572 |
" print(\"\\n\\033[33m➤ LoRAs\\033[0m\")\n",
|
573 |
-
"
|
574 |
" print(f\"\\n\\033[33m➤ Extensions\\033[0m\")\n",
|
575 |
-
"
|
576 |
" if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):\n",
|
577 |
" print(\"\\n\\033[33m➤ ControlNet\\033[0m\")\n",
|
578 |
-
"
|
579 |
"\n",
|
580 |
"\n",
|
581 |
"# === OTHER ===\n",
|
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
+
"# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
|
348 |
+
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
349 |
+
"!mkdir -p {\" \".join(directories)}\n",
|
350 |
"\n",
|
351 |
"url = \"\"\n",
|
352 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
353 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
354 |
"\n",
|
355 |
+
"''' Get Image Preview | CivitAi '''\n",
|
356 |
+
"\n",
|
357 |
+
"def get_data_from_api(model_id): # get model data\n",
|
358 |
+
" base_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
|
359 |
+
" headers = {\"Content-Type\": \"application/json\"}\n",
|
360 |
+
" try:\n",
|
361 |
+
" response = requests.get(base_url, headers=headers)\n",
|
362 |
+
" if response.status_code == 200:\n",
|
363 |
+
" return response.json()\n",
|
364 |
+
" else:\n",
|
365 |
+
" print(f\"Failed to retrieve data. Status code: {response.status_code}\")\n",
|
366 |
+
" return None\n",
|
367 |
+
" except requests.exceptions.RequestException as e:\n",
|
368 |
+
" print(f\"An error occurred: {e}\")\n",
|
369 |
+
" return None\n",
|
370 |
+
"\n",
|
371 |
+
"def extract_file_and_image_info(data):\n",
|
372 |
+
" files = data.get('files', [{}])\n",
|
373 |
+
" model_name = files[0].get('name', None) # get original file name\n",
|
374 |
+
" images = data.get('images', [{}])\n",
|
375 |
+
" image_url = images[0].get('url', None) # get preview: first image\n",
|
376 |
+
" return model_name, image_url\n",
|
377 |
+
"\n",
|
378 |
+
"def modify_image_url(image_url):\n",
|
379 |
+
" parts = image_url.split('/')\n",
|
380 |
+
" for i, part in enumerate(parts):\n",
|
381 |
+
" if part.startswith('width='):\n",
|
382 |
+
" width_value = int(part.split('=')[1])\n",
|
383 |
+
" parts[i] = f'width={width_value * 2}' # resize for quality image\n",
|
384 |
+
" break\n",
|
385 |
+
" return '/'.join(parts)\n",
|
386 |
+
"\n",
|
387 |
+
"def generate_preview_filename(model_name, image_url):\n",
|
388 |
+
" file_parts = model_name.split('.')\n",
|
389 |
+
" image_format = image_url.split('.')[-1].split('?')[0]\n",
|
390 |
+
" return f\"{file_parts[0]}.preview.{image_format}\" # assigning the original image format\n",
|
391 |
+
"\n",
|
392 |
"''' main download code '''\n",
|
393 |
"\n",
|
394 |
"def handle_manual(url):\n",
|
|
|
413 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
414 |
" # print(url, dst_dir, file_name)\n",
|
415 |
"\n",
|
416 |
+
" # === CivitAi API ===\n",
|
417 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
418 |
" if 'civitai' in url and civitai_token:\n",
|
419 |
+
" model_id = url.split('/')[-1]\n",
|
420 |
+
" data = get_data_from_api(model_id)\n",
|
421 |
" url = f\"{url}?token={civitai_token}\"\n",
|
422 |
"\n",
|
423 |
+
" if data:\n",
|
424 |
+
" model_name, image_url = extract_file_and_image_info(data)\n",
|
425 |
+
" if model_name and image_url:\n",
|
426 |
+
" new_image_url = modify_image_url(image_url)\n",
|
427 |
+
" image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)\n",
|
428 |
+
" save_img_path = f\"{dst_dir}/{image_file_name}\"\n",
|
429 |
+
" !wget -O {save_img_path} {new_image_url} # download image\n",
|
430 |
+
" # print(f\"\\n\\n\\n{save_img_path, new_image_url}\\n\\n\\n\")\n",
|
431 |
+
" else:\n",
|
432 |
+
" print(\"File name or image URL missing.\")\n",
|
433 |
+
" else:\n",
|
434 |
+
" print(\"Failed to retrieve data from the API.\")\n",
|
435 |
+
"\n",
|
436 |
" # -- GDrive --\n",
|
437 |
" if 'drive.google' in url:\n",
|
438 |
" if 'folders' in url:\n",
|
|
|
477 |
"\n",
|
478 |
"## unpucking zip files\n",
|
479 |
"def unpucking_zip_files():\n",
|
480 |
+
" # directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
|
|
481 |
" for directory in directories:\n",
|
482 |
" for root, dirs, files in os.walk(directory):\n",
|
483 |
" for file in files:\n",
|
|
|
614 |
"else:\n",
|
615 |
" if any(not file.endswith('.txt') for file in os.listdir(models_dir)):\n",
|
616 |
" print(\"\\n\\033[33m➤ Models\\033[0m\")\n",
|
617 |
+
" !find {models_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'\n",
|
618 |
" if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):\n",
|
619 |
" print(\"\\n\\033[33m➤ VAEs\\033[0m\")\n",
|
620 |
+
" !find {vaes_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'\n",
|
621 |
" if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):\n",
|
622 |
" print(\"\\n\\033[33m➤ Embeddings\\033[0m\")\n",
|
623 |
+
" !find {embeddings_dir}/ -mindepth 1 -maxdepth 1 \\( -name '*.pt' -or -name '*.safetensors' \\) -printf '%f\\n'\n",
|
624 |
" if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):\n",
|
625 |
" print(\"\\n\\033[33m➤ LoRAs\\033[0m\")\n",
|
626 |
+
" !find {loras_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'\n",
|
627 |
" print(f\"\\n\\033[33m➤ Extensions\\033[0m\")\n",
|
628 |
+
" !find {extensions_dir}/ -mindepth 1 -maxdepth 1 ! -name '*.txt' -printf '%f\\n'\n",
|
629 |
" if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):\n",
|
630 |
" print(\"\\n\\033[33m➤ ControlNet\\033[0m\")\n",
|
631 |
+
" !find {control_dir}/ -mindepth 1 ! -name '*.yaml' -printf '%f\\n' | sed 's/^[^_]*_[^_]*_[^_]*_\\([^_]*\\)_fp16\\.safetensors$/\\1/'\n",
|
632 |
"\n",
|
633 |
"\n",
|
634 |
"# === OTHER ===\n",
|
files_cells/notebooks/ru/downloading_ru.ipynb
CHANGED
@@ -344,12 +344,51 @@
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
-
"!mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
|
|
|
|
|
348 |
"\n",
|
349 |
"url = \"\"\n",
|
350 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
351 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
352 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
"''' main download code '''\n",
|
354 |
"\n",
|
355 |
"def handle_manual(url):\n",
|
@@ -374,11 +413,26 @@
|
|
374 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
375 |
" # print(url, dst_dir, file_name)\n",
|
376 |
"\n",
|
377 |
-
" #
|
378 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
379 |
" if 'civitai' in url and civitai_token:\n",
|
|
|
|
|
380 |
" url = f\"{url}?token={civitai_token}\"\n",
|
381 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
" # -- GDrive --\n",
|
383 |
" if 'drive.google' in url:\n",
|
384 |
" if 'folders' in url:\n",
|
@@ -388,7 +442,7 @@
|
|
388 |
" !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
|
389 |
" else:\n",
|
390 |
" !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
|
391 |
-
" # --
|
392 |
" elif 'huggingface' in url:\n",
|
393 |
" if '/blob/' in url:\n",
|
394 |
" url = url.replace('/blob/', '/resolve/')\n",
|
@@ -423,8 +477,7 @@
|
|
423 |
"\n",
|
424 |
"## unpucking zip files\n",
|
425 |
"def unpucking_zip_files():\n",
|
426 |
-
" directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
427 |
-
"\n",
|
428 |
" for directory in directories:\n",
|
429 |
" for root, dirs, files in os.walk(directory):\n",
|
430 |
" for file in files:\n",
|
@@ -561,21 +614,21 @@
|
|
561 |
"else:\n",
|
562 |
" if any(not file.endswith('.txt') for file in os.listdir(models_dir)):\n",
|
563 |
" print(\"\\n\\033[33m➤ Models\\033[0m\")\n",
|
564 |
-
"
|
565 |
" if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):\n",
|
566 |
" print(\"\\n\\033[33m➤ VAEs\\033[0m\")\n",
|
567 |
-
"
|
568 |
" if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):\n",
|
569 |
" print(\"\\n\\033[33m➤ Embeddings\\033[0m\")\n",
|
570 |
-
"
|
571 |
" if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):\n",
|
572 |
" print(\"\\n\\033[33m➤ LoRAs\\033[0m\")\n",
|
573 |
-
"
|
574 |
" print(f\"\\n\\033[33m➤ Extensions\\033[0m\")\n",
|
575 |
-
"
|
576 |
" if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):\n",
|
577 |
" print(\"\\n\\033[33m➤ ControlNet\\033[0m\")\n",
|
578 |
-
"
|
579 |
"\n",
|
580 |
"\n",
|
581 |
"# === OTHER ===\n",
|
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
+
"# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
|
348 |
+
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
349 |
+
"!mkdir -p {\" \".join(directories)}\n",
|
350 |
"\n",
|
351 |
"url = \"\"\n",
|
352 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
353 |
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
354 |
"\n",
|
355 |
+
"''' Get Image Preview | CivitAi '''\n",
|
356 |
+
"\n",
|
357 |
+
"def get_data_from_api(model_id): # get model data\n",
|
358 |
+
" base_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
|
359 |
+
" headers = {\"Content-Type\": \"application/json\"}\n",
|
360 |
+
" try:\n",
|
361 |
+
" response = requests.get(base_url, headers=headers)\n",
|
362 |
+
" if response.status_code == 200:\n",
|
363 |
+
" return response.json()\n",
|
364 |
+
" else:\n",
|
365 |
+
" print(f\"Failed to retrieve data. Status code: {response.status_code}\")\n",
|
366 |
+
" return None\n",
|
367 |
+
" except requests.exceptions.RequestException as e:\n",
|
368 |
+
" print(f\"An error occurred: {e}\")\n",
|
369 |
+
" return None\n",
|
370 |
+
"\n",
|
371 |
+
"def extract_file_and_image_info(data):\n",
|
372 |
+
" files = data.get('files', [{}])\n",
|
373 |
+
" model_name = files[0].get('name', None) # get original file name\n",
|
374 |
+
" images = data.get('images', [{}])\n",
|
375 |
+
" image_url = images[0].get('url', None) # get preview: first image\n",
|
376 |
+
" return model_name, image_url\n",
|
377 |
+
"\n",
|
378 |
+
"def modify_image_url(image_url):\n",
|
379 |
+
" parts = image_url.split('/')\n",
|
380 |
+
" for i, part in enumerate(parts):\n",
|
381 |
+
" if part.startswith('width='):\n",
|
382 |
+
" width_value = int(part.split('=')[1])\n",
|
383 |
+
" parts[i] = f'width={width_value * 2}' # resize for quality image\n",
|
384 |
+
" break\n",
|
385 |
+
" return '/'.join(parts)\n",
|
386 |
+
"\n",
|
387 |
+
"def generate_preview_filename(model_name, image_url):\n",
|
388 |
+
" file_parts = model_name.split('.')\n",
|
389 |
+
" image_format = image_url.split('.')[-1].split('?')[0]\n",
|
390 |
+
" return f\"{file_parts[0]}.preview.{image_format}\" # assigning the original image format\n",
|
391 |
+
"\n",
|
392 |
"''' main download code '''\n",
|
393 |
"\n",
|
394 |
"def handle_manual(url):\n",
|
|
|
413 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
414 |
" # print(url, dst_dir, file_name)\n",
|
415 |
"\n",
|
416 |
+
" # === CivitAi API ===\n",
|
417 |
" civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
|
418 |
" if 'civitai' in url and civitai_token:\n",
|
419 |
+
" model_id = url.split('/')[-1]\n",
|
420 |
+
" data = get_data_from_api(model_id)\n",
|
421 |
" url = f\"{url}?token={civitai_token}\"\n",
|
422 |
"\n",
|
423 |
+
" if data:\n",
|
424 |
+
" model_name, image_url = extract_file_and_image_info(data)\n",
|
425 |
+
" if model_name and image_url:\n",
|
426 |
+
" new_image_url = modify_image_url(image_url)\n",
|
427 |
+
" image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)\n",
|
428 |
+
" save_img_path = f\"{dst_dir}/{image_file_name}\"\n",
|
429 |
+
" !wget -O {save_img_path} {new_image_url} # download image\n",
|
430 |
+
" # print(f\"\\n\\n\\n{save_img_path, new_image_url}\\n\\n\\n\")\n",
|
431 |
+
" else:\n",
|
432 |
+
" print(\"File name or image URL missing.\")\n",
|
433 |
+
" else:\n",
|
434 |
+
" print(\"Failed to retrieve data from the API.\")\n",
|
435 |
+
"\n",
|
436 |
" # -- GDrive --\n",
|
437 |
" if 'drive.google' in url:\n",
|
438 |
" if 'folders' in url:\n",
|
|
|
442 |
" !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
|
443 |
" else:\n",
|
444 |
" !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
|
445 |
+
" # -- Hugging Face --\n",
|
446 |
" elif 'huggingface' in url:\n",
|
447 |
" if '/blob/' in url:\n",
|
448 |
" url = url.replace('/blob/', '/resolve/')\n",
|
|
|
477 |
"\n",
|
478 |
"## unpucking zip files\n",
|
479 |
"def unpucking_zip_files():\n",
|
480 |
+
" # directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
|
|
481 |
" for directory in directories:\n",
|
482 |
" for root, dirs, files in os.walk(directory):\n",
|
483 |
" for file in files:\n",
|
|
|
614 |
"else:\n",
|
615 |
" if any(not file.endswith('.txt') for file in os.listdir(models_dir)):\n",
|
616 |
" print(\"\\n\\033[33m➤ Models\\033[0m\")\n",
|
617 |
+
" !find {models_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'\n",
|
618 |
" if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):\n",
|
619 |
" print(\"\\n\\033[33m➤ VAEs\\033[0m\")\n",
|
620 |
+
" !find {vaes_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'\n",
|
621 |
" if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):\n",
|
622 |
" print(\"\\n\\033[33m➤ Embeddings\\033[0m\")\n",
|
623 |
+
" !find {embeddings_dir}/ -mindepth 1 -maxdepth 1 \\( -name '*.pt' -or -name '*.safetensors' \\) -printf '%f\\n'\n",
|
624 |
" if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):\n",
|
625 |
" print(\"\\n\\033[33m➤ LoRAs\\033[0m\")\n",
|
626 |
+
" !find {loras_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'\n",
|
627 |
" print(f\"\\n\\033[33m➤ Extensions\\033[0m\")\n",
|
628 |
+
" !find {extensions_dir}/ -mindepth 1 -maxdepth 1 ! -name '*.txt' -printf '%f\\n'\n",
|
629 |
" if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):\n",
|
630 |
" print(\"\\n\\033[33m➤ ControlNet\\033[0m\")\n",
|
631 |
+
" !find {control_dir}/ -mindepth 1 ! -name '*.yaml' -printf '%f\\n' | sed 's/^[^_]*_[^_]*_[^_]*_\\([^_]*\\)_fp16\\.safetensors$/\\1/'\n",
|
632 |
"\n",
|
633 |
"\n",
|
634 |
"# === OTHER ===\n",
|
files_cells/python/en/downloading_en.py
CHANGED
@@ -325,12 +325,51 @@ prefixes = {
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
-
|
|
|
|
|
329 |
|
330 |
url = ""
|
331 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
332 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
333 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
334 |
''' main download code '''
|
335 |
|
336 |
def handle_manual(url):
|
@@ -355,11 +394,26 @@ def manual_download(url, dst_dir, file_name):
|
|
355 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
356 |
# print(url, dst_dir, file_name)
|
357 |
|
358 |
-
#
|
359 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
360 |
if 'civitai' in url and civitai_token:
|
|
|
|
|
361 |
url = f"{url}?token={civitai_token}"
|
362 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
363 |
# -- GDrive --
|
364 |
if 'drive.google' in url:
|
365 |
if 'folders' in url:
|
@@ -404,8 +458,7 @@ def download(url):
|
|
404 |
|
405 |
## unpucking zip files
|
406 |
def unpucking_zip_files():
|
407 |
-
directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
408 |
-
|
409 |
for directory in directories:
|
410 |
for root, dirs, files in os.walk(directory):
|
411 |
for file in files:
|
@@ -539,21 +592,21 @@ if detailed_download == "off":
|
|
539 |
else:
|
540 |
if any(not file.endswith('.txt') for file in os.listdir(models_dir)):
|
541 |
print("\n\033[33m➤ Models\033[0m")
|
542 |
-
get_ipython().system("find {models_dir}/ -mindepth 1
|
543 |
if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):
|
544 |
print("\n\033[33m➤ VAEs\033[0m")
|
545 |
-
get_ipython().system("find {vaes_dir}/ -mindepth 1
|
546 |
if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):
|
547 |
print("\n\033[33m➤ Embeddings\033[0m")
|
548 |
get_ipython().system("find {embeddings_dir}/ -mindepth 1 -maxdepth 1 \\( -name '*.pt' -or -name '*.safetensors' \\) -printf '%f\\n'")
|
549 |
if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):
|
550 |
print("\n\033[33m➤ LoRAs\033[0m")
|
551 |
-
get_ipython().system("find {loras_dir}/ -mindepth 1
|
552 |
print(f"\n\033[33m➤ Extensions\033[0m")
|
553 |
get_ipython().system("find {extensions_dir}/ -mindepth 1 -maxdepth 1 ! -name '*.txt' -printf '%f\\n'")
|
554 |
if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):
|
555 |
print("\n\033[33m➤ ControlNet\033[0m")
|
556 |
-
get_ipython().system("find {control_dir}/ -mindepth 1 ! -name '*.yaml' -printf '%f\\n' | sed 's/^[^_]*_[^_]*_[^_]*_\\(
|
557 |
|
558 |
|
559 |
# === OTHER ===
|
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
+
# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}
|
329 |
+
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
330 |
+
get_ipython().system('mkdir -p {" ".join(directories)}')
|
331 |
|
332 |
url = ""
|
333 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
334 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
335 |
|
336 |
+
''' Get Image Preview | CivitAi '''
|
337 |
+
|
338 |
+
def get_data_from_api(model_id): # get model data
|
339 |
+
base_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
|
340 |
+
headers = {"Content-Type": "application/json"}
|
341 |
+
try:
|
342 |
+
response = requests.get(base_url, headers=headers)
|
343 |
+
if response.status_code == 200:
|
344 |
+
return response.json()
|
345 |
+
else:
|
346 |
+
print(f"Failed to retrieve data. Status code: {response.status_code}")
|
347 |
+
return None
|
348 |
+
except requests.exceptions.RequestException as e:
|
349 |
+
print(f"An error occurred: {e}")
|
350 |
+
return None
|
351 |
+
|
352 |
+
def extract_file_and_image_info(data):
|
353 |
+
files = data.get('files', [{}])
|
354 |
+
model_name = files[0].get('name', None) # get original file name
|
355 |
+
images = data.get('images', [{}])
|
356 |
+
image_url = images[0].get('url', None) # get preview: first image
|
357 |
+
return model_name, image_url
|
358 |
+
|
359 |
+
def modify_image_url(image_url):
|
360 |
+
parts = image_url.split('/')
|
361 |
+
for i, part in enumerate(parts):
|
362 |
+
if part.startswith('width='):
|
363 |
+
width_value = int(part.split('=')[1])
|
364 |
+
parts[i] = f'width={width_value * 2}' # resize for quality image
|
365 |
+
break
|
366 |
+
return '/'.join(parts)
|
367 |
+
|
368 |
+
def generate_preview_filename(model_name, image_url):
|
369 |
+
file_parts = model_name.split('.')
|
370 |
+
image_format = image_url.split('.')[-1].split('?')[0]
|
371 |
+
return f"{file_parts[0]}.preview.{image_format}" # assigning the original image format
|
372 |
+
|
373 |
''' main download code '''
|
374 |
|
375 |
def handle_manual(url):
|
|
|
394 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
395 |
# print(url, dst_dir, file_name)
|
396 |
|
397 |
+
# === CivitAi API ===
|
398 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
399 |
if 'civitai' in url and civitai_token:
|
400 |
+
model_id = url.split('/')[-1]
|
401 |
+
data = get_data_from_api(model_id)
|
402 |
url = f"{url}?token={civitai_token}"
|
403 |
|
404 |
+
if data:
|
405 |
+
model_name, image_url = extract_file_and_image_info(data)
|
406 |
+
if model_name and image_url:
|
407 |
+
new_image_url = modify_image_url(image_url)
|
408 |
+
image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)
|
409 |
+
save_img_path = f"{dst_dir}/{image_file_name}"
|
410 |
+
get_ipython().system('wget -O {save_img_path} {new_image_url} # download image')
|
411 |
+
# print(f"\n\n\n{save_img_path, new_image_url}\n\n\n")
|
412 |
+
else:
|
413 |
+
print("File name or image URL missing.")
|
414 |
+
else:
|
415 |
+
print("Failed to retrieve data from the API.")
|
416 |
+
|
417 |
# -- GDrive --
|
418 |
if 'drive.google' in url:
|
419 |
if 'folders' in url:
|
|
|
458 |
|
459 |
## unpucking zip files
|
460 |
def unpucking_zip_files():
|
461 |
+
# directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
|
|
462 |
for directory in directories:
|
463 |
for root, dirs, files in os.walk(directory):
|
464 |
for file in files:
|
|
|
592 |
else:
|
593 |
if any(not file.endswith('.txt') for file in os.listdir(models_dir)):
|
594 |
print("\n\033[33m➤ Models\033[0m")
|
595 |
+
get_ipython().system("find {models_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'")
|
596 |
if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):
|
597 |
print("\n\033[33m➤ VAEs\033[0m")
|
598 |
+
get_ipython().system("find {vaes_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'")
|
599 |
if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):
|
600 |
print("\n\033[33m➤ Embeddings\033[0m")
|
601 |
get_ipython().system("find {embeddings_dir}/ -mindepth 1 -maxdepth 1 \\( -name '*.pt' -or -name '*.safetensors' \\) -printf '%f\\n'")
|
602 |
if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):
|
603 |
print("\n\033[33m➤ LoRAs\033[0m")
|
604 |
+
get_ipython().system("find {loras_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'")
|
605 |
print(f"\n\033[33m➤ Extensions\033[0m")
|
606 |
get_ipython().system("find {extensions_dir}/ -mindepth 1 -maxdepth 1 ! -name '*.txt' -printf '%f\\n'")
|
607 |
if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):
|
608 |
print("\n\033[33m➤ ControlNet\033[0m")
|
609 |
+
get_ipython().system("find {control_dir}/ -mindepth 1 ! -name '*.yaml' -printf '%f\\n' | sed 's/^[^_]*_[^_]*_[^_]*_\\([^_]*\\)_fp16\\.safetensors$/\\1/'")
|
610 |
|
611 |
|
612 |
# === OTHER ===
|
files_cells/python/ru/downloading_ru.py
CHANGED
@@ -325,12 +325,51 @@ prefixes = {
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
-
|
|
|
|
|
329 |
|
330 |
url = ""
|
331 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
332 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
333 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
334 |
''' main download code '''
|
335 |
|
336 |
def handle_manual(url):
|
@@ -355,11 +394,26 @@ def manual_download(url, dst_dir, file_name):
|
|
355 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
356 |
# print(url, dst_dir, file_name)
|
357 |
|
358 |
-
#
|
359 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
360 |
if 'civitai' in url and civitai_token:
|
|
|
|
|
361 |
url = f"{url}?token={civitai_token}"
|
362 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
363 |
# -- GDrive --
|
364 |
if 'drive.google' in url:
|
365 |
if 'folders' in url:
|
@@ -369,7 +423,7 @@ def manual_download(url, dst_dir, file_name):
|
|
369 |
get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
|
370 |
else:
|
371 |
get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
|
372 |
-
# --
|
373 |
elif 'huggingface' in url:
|
374 |
if '/blob/' in url:
|
375 |
url = url.replace('/blob/', '/resolve/')
|
@@ -404,8 +458,7 @@ def download(url):
|
|
404 |
|
405 |
## unpucking zip files
|
406 |
def unpucking_zip_files():
|
407 |
-
directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
408 |
-
|
409 |
for directory in directories:
|
410 |
for root, dirs, files in os.walk(directory):
|
411 |
for file in files:
|
@@ -539,21 +592,21 @@ if detailed_download == "off":
|
|
539 |
else:
|
540 |
if any(not file.endswith('.txt') for file in os.listdir(models_dir)):
|
541 |
print("\n\033[33m➤ Models\033[0m")
|
542 |
-
get_ipython().system("find {models_dir}/ -mindepth 1
|
543 |
if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):
|
544 |
print("\n\033[33m➤ VAEs\033[0m")
|
545 |
-
get_ipython().system("find {vaes_dir}/ -mindepth 1
|
546 |
if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):
|
547 |
print("\n\033[33m➤ Embeddings\033[0m")
|
548 |
get_ipython().system("find {embeddings_dir}/ -mindepth 1 -maxdepth 1 \\( -name '*.pt' -or -name '*.safetensors' \\) -printf '%f\\n'")
|
549 |
if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):
|
550 |
print("\n\033[33m➤ LoRAs\033[0m")
|
551 |
-
get_ipython().system("find {loras_dir}/ -mindepth 1
|
552 |
print(f"\n\033[33m➤ Extensions\033[0m")
|
553 |
get_ipython().system("find {extensions_dir}/ -mindepth 1 -maxdepth 1 ! -name '*.txt' -printf '%f\\n'")
|
554 |
if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):
|
555 |
print("\n\033[33m➤ ControlNet\033[0m")
|
556 |
-
get_ipython().system("find {control_dir}/ -mindepth 1 ! -name '*.yaml' -printf '%f\\n' | sed 's/^[^_]*_[^_]*_[^_]*_\\(
|
557 |
|
558 |
|
559 |
# === OTHER ===
|
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
+
# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}
|
329 |
+
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
330 |
+
get_ipython().system('mkdir -p {" ".join(directories)}')
|
331 |
|
332 |
url = ""
|
333 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
334 |
user_header = f"\"Authorization: Bearer {hf_token}\""
|
335 |
|
336 |
+
''' Get Image Preview | CivitAi '''
|
337 |
+
|
338 |
+
def get_data_from_api(model_id): # get model data
|
339 |
+
base_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
|
340 |
+
headers = {"Content-Type": "application/json"}
|
341 |
+
try:
|
342 |
+
response = requests.get(base_url, headers=headers)
|
343 |
+
if response.status_code == 200:
|
344 |
+
return response.json()
|
345 |
+
else:
|
346 |
+
print(f"Failed to retrieve data. Status code: {response.status_code}")
|
347 |
+
return None
|
348 |
+
except requests.exceptions.RequestException as e:
|
349 |
+
print(f"An error occurred: {e}")
|
350 |
+
return None
|
351 |
+
|
352 |
+
def extract_file_and_image_info(data):
|
353 |
+
files = data.get('files', [{}])
|
354 |
+
model_name = files[0].get('name', None) # get original file name
|
355 |
+
images = data.get('images', [{}])
|
356 |
+
image_url = images[0].get('url', None) # get preview: first image
|
357 |
+
return model_name, image_url
|
358 |
+
|
359 |
+
def modify_image_url(image_url):
|
360 |
+
parts = image_url.split('/')
|
361 |
+
for i, part in enumerate(parts):
|
362 |
+
if part.startswith('width='):
|
363 |
+
width_value = int(part.split('=')[1])
|
364 |
+
parts[i] = f'width={width_value * 2}' # resize for quality image
|
365 |
+
break
|
366 |
+
return '/'.join(parts)
|
367 |
+
|
368 |
+
def generate_preview_filename(model_name, image_url):
|
369 |
+
file_parts = model_name.split('.')
|
370 |
+
image_format = image_url.split('.')[-1].split('?')[0]
|
371 |
+
return f"{file_parts[0]}.preview.{image_format}" # assigning the original image format
|
372 |
+
|
373 |
''' main download code '''
|
374 |
|
375 |
def handle_manual(url):
|
|
|
394 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
395 |
# print(url, dst_dir, file_name)
|
396 |
|
397 |
+
# === CivitAi API ===
|
398 |
civitai_token = "62c0c5956b2f9defbd844d754000180b"
|
399 |
if 'civitai' in url and civitai_token:
|
400 |
+
model_id = url.split('/')[-1]
|
401 |
+
data = get_data_from_api(model_id)
|
402 |
url = f"{url}?token={civitai_token}"
|
403 |
|
404 |
+
if data:
|
405 |
+
model_name, image_url = extract_file_and_image_info(data)
|
406 |
+
if model_name and image_url:
|
407 |
+
new_image_url = modify_image_url(image_url)
|
408 |
+
image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)
|
409 |
+
save_img_path = f"{dst_dir}/{image_file_name}"
|
410 |
+
get_ipython().system('wget -O {save_img_path} {new_image_url} # download image')
|
411 |
+
# print(f"\n\n\n{save_img_path, new_image_url}\n\n\n")
|
412 |
+
else:
|
413 |
+
print("File name or image URL missing.")
|
414 |
+
else:
|
415 |
+
print("Failed to retrieve data from the API.")
|
416 |
+
|
417 |
# -- GDrive --
|
418 |
if 'drive.google' in url:
|
419 |
if 'folders' in url:
|
|
|
423 |
get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
|
424 |
else:
|
425 |
get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
|
426 |
+
# -- Hugging Face --
|
427 |
elif 'huggingface' in url:
|
428 |
if '/blob/' in url:
|
429 |
url = url.replace('/blob/', '/resolve/')
|
|
|
458 |
|
459 |
## unpucking zip files
|
460 |
def unpucking_zip_files():
|
461 |
+
# directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
|
|
462 |
for directory in directories:
|
463 |
for root, dirs, files in os.walk(directory):
|
464 |
for file in files:
|
|
|
592 |
else:
|
593 |
if any(not file.endswith('.txt') for file in os.listdir(models_dir)):
|
594 |
print("\n\033[33m➤ Models\033[0m")
|
595 |
+
get_ipython().system("find {models_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'")
|
596 |
if any(not file.endswith('.txt') for file in os.listdir(vaes_dir)):
|
597 |
print("\n\033[33m➤ VAEs\033[0m")
|
598 |
+
get_ipython().system("find {vaes_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'")
|
599 |
if any(not file.endswith('.txt') and not os.path.isdir(os.path.join(embeddings_dir, file)) for file in os.listdir(embeddings_dir)):
|
600 |
print("\n\033[33m➤ Embeddings\033[0m")
|
601 |
get_ipython().system("find {embeddings_dir}/ -mindepth 1 -maxdepth 1 \\( -name '*.pt' -or -name '*.safetensors' \\) -printf '%f\\n'")
|
602 |
if any(not file.endswith('.txt') for file in os.listdir(loras_dir)):
|
603 |
print("\n\033[33m➤ LoRAs\033[0m")
|
604 |
+
get_ipython().system("find {loras_dir}/ -mindepth 1 -name '*.safetensors' -printf '%f\\n'")
|
605 |
print(f"\n\033[33m➤ Extensions\033[0m")
|
606 |
get_ipython().system("find {extensions_dir}/ -mindepth 1 -maxdepth 1 ! -name '*.txt' -printf '%f\\n'")
|
607 |
if any(not file.endswith(('.txt', '.yaml')) for file in os.listdir(control_dir)):
|
608 |
print("\n\033[33m➤ ControlNet\033[0m")
|
609 |
+
get_ipython().system("find {control_dir}/ -mindepth 1 ! -name '*.yaml' -printf '%f\\n' | sed 's/^[^_]*_[^_]*_[^_]*_\\([^_]*\\)_fp16\\.safetensors$/\\1/'")
|
610 |
|
611 |
|
612 |
# === OTHER ===
|