scrapertool / scraper_run.py
engineofperplexity's picture
initial release
9741963
import os
import json
import argparse
import hashlib
import requests
import time
from safetensors import safe_open
from tqdm import tqdm
MAX_RETRIES = 3
BACKOFF_BASE = 2
def retry_request(url, timeout=10, desc=""):
attempt = 0
while attempt < MAX_RETRIES:
try:
response = requests.get(url, timeout=timeout)
if response.status_code == 200:
return response
elif response.status_code == 429:
tqdm.write(f"{desc} - Rate limited (HTTP 429). Retrying ({attempt + 1}/{MAX_RETRIES})...")
else:
tqdm.write(f"{desc} - HTTP {response.status_code}. Retrying ({attempt + 1}/{MAX_RETRIES})...")
except requests.exceptions.RequestException as e:
tqdm.write(f"{desc} - Error: {e}. Retrying ({attempt + 1}/{MAX_RETRIES})...")
attempt += 1
time.sleep(BACKOFF_BASE * (2 ** (attempt - 1)))
tqdm.write(f"{desc} - Failed after {MAX_RETRIES} attempts.")
return None
def compute_sha256(file_path):
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as f:
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def extract_local_metadata(safetensors_file):
try:
with safe_open(safetensors_file, framework="pt") as f:
metadata = f.metadata()
return metadata
except Exception as e:
tqdm.write(f"Error reading {safetensors_file}: {e}")
return None
def fetch_civitai_metadata_by_model_id(model_id):
url = f"https://civitai.com/api/v1/models/{model_id}"
response = retry_request(url, desc=f"Fetching metadata for model ID {model_id}")
if response and response.status_code == 200:
return response.json()
return None
def fetch_civitai_metadata_by_hash(file_hash):
url = f"https://civitai.com/api/v1/model-versions/by-hash/{file_hash}"
response = retry_request(url, desc=f"Fetching metadata by hash {file_hash[:10]}...")
if response and response.status_code == 200:
return response.json()
return None
def download_preview_images(images_list, save_dir, base_filename, delay=0.5, image_pbar=None, use_subdir=True):
if use_subdir:
subdir = os.path.join(save_dir, f"{base_filename}_previews")
os.makedirs(subdir, exist_ok=True)
else:
subdir = save_dir # save in same folder
for idx, img_data in enumerate(images_list):
url = img_data.get('url')
if not url:
continue
ext = os.path.splitext(url)[1].split('?')[0]
img_name = f"{base_filename}_preview_{idx+1}{ext}"
# Paths to check (both flat + subdir)
flat_img_path = os.path.join(save_dir, img_name)
subdir_img_path = os.path.join(subdir, img_name)
if os.path.exists(flat_img_path) or os.path.exists(subdir_img_path):
tqdm.write(f"Preview already exists: {img_name} (skipping)")
if image_pbar:
image_pbar.update(1)
continue
desc = f"Downloading media {idx + 1}/{len(images_list)}"
response = retry_request(url, desc=desc)
if response and response.status_code == 200:
img_path = subdir_img_path if use_subdir else flat_img_path
with open(img_path, 'wb') as img_file:
img_file.write(response.content)
tqdm.write(f"Saved preview: {img_path}")
if image_pbar:
image_pbar.update(1)
time.sleep(delay)
def process_directory(root_dir, force=False, scrape_civitai=False, delay=0.5, previews_subdir=True, max_media=None):
safetensors_files = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for filename in filenames:
if filename.endswith(".safetensors"):
safetensors_files.append(os.path.join(dirpath, filename))
print(f"\nFound {len(safetensors_files)} .safetensors files.\n")
with tqdm(total=len(safetensors_files), desc="Total Progress", unit="file") as total_pbar:
for safetensors_path in safetensors_files:
dirpath = os.path.dirname(safetensors_path)
filename = os.path.basename(safetensors_path)
base_filename = os.path.splitext(filename)[0]
json_filename = f"{base_filename}.metadata.json"
json_path = os.path.join(dirpath, json_filename)
if os.path.exists(json_path) and not force:
tqdm.write(f"Skipping (metadata exists): {safetensors_path}")
total_pbar.update(1)
continue
tqdm.write(f"\nProcessing: {safetensors_path}")
metadata = extract_local_metadata(safetensors_path)
combined_metadata = {'local_metadata': metadata if metadata else {}}
civitai_data = None
if scrape_civitai:
civitai_model_id = None
if metadata:
if 'ss_civitai_model_id' in metadata:
civitai_model_id = metadata['ss_civitai_model_id']
elif 'ss_civitai_url' in metadata:
parts = metadata['ss_civitai_url'].split('/')
civitai_model_id = next((part for part in parts if part.isdigit()), None)
if civitai_model_id:
tqdm.write(f"Found model ID in metadata: {civitai_model_id}")
civitai_data = fetch_civitai_metadata_by_model_id(civitai_model_id)
time.sleep(delay)
else:
tqdm.write("No CivitAI model ID found in metadata. Trying hash lookup...")
file_hash = compute_sha256(safetensors_path)
civitai_data = fetch_civitai_metadata_by_hash(file_hash)
time.sleep(delay)
if civitai_data:
civitai_meta = {
'civitai_model_id': civitai_data.get('modelId') or civitai_data.get('id'),
'civitai_model_version_id': civitai_data.get('id'),
'civitai_name': civitai_data.get('name'),
'description': civitai_data.get('description'),
'tags': civitai_data.get('tags'),
'trainedWords': civitai_data.get('trainedWords'),
'images': civitai_data.get('images')
}
combined_metadata['civitai_metadata'] = civitai_meta
images_list = civitai_meta.get('images', [])
if images_list:
# Apply max_media logic
if max_media == 0:
tqdm.write("Skipping download of preview images/videos (user selected 0).")
else:
if max_media is not None:
images_list = images_list[:max_media]
with tqdm(total=len(images_list), desc="Image/Video Progress", leave=False) as image_pbar:
download_preview_images(
images_list,
dirpath,
base_filename,
delay=delay,
image_pbar=image_pbar,
use_subdir=previews_subdir
)
else:
tqdm.write("No preview images/videos found.")
else:
tqdm.write("No CivitAI data found (model ID or hash lookup failed).")
with open(json_path, "w", encoding="utf-8") as f:
json.dump(combined_metadata, f, indent=4, ensure_ascii=False)
tqdm.write(f"Saved metadata to: {json_path}")
total_pbar.update(1)
def interactive_menu():
print("\n=== LoRA Metadata Scraper Config ===\n")
scrape_civitai = input("A) Scrape CivitAI? (Y/N) [Default: N]: ").strip().lower() == 'y'
delay_choice = input("B) Use default delay (0.5s), no delay (0), or custom? (D/N/C) [Default: D]: ").strip().lower()
if delay_choice == 'n':
delay = 0.0
elif delay_choice == 'c':
delay = float(input("Enter delay in seconds (e.g., 0.5): ").strip())
else:
delay = 0.5
force = input("C) Force re-scrape if metadata exists? (Y/N) [Default: N]: ").strip().lower() == 'y'
loras_only = input("D) Scan only the LoRAs folder? (Y/N) [Default: Y]: ").strip().lower() != 'n'
previews_subdir = input("E) Save preview images in a subdirectory? (Y/N) [Default: Y]: ").strip().lower() != 'n'
media_choice = input("F) How many preview images/videos to download? (A=All [default], N=None, X=Number): ").strip().lower()
if media_choice == 'n':
max_media = 0
elif media_choice == 'a' or media_choice == '':
max_media = None
else:
try:
max_media = int(media_choice)
except ValueError:
max_media = None # fallback to all
print("\n=== Starting with your selected options ===\n")
return force, scrape_civitai, delay, loras_only, previews_subdir, max_media
if __name__ == "__main__":
print(">>> Script started")
parser = argparse.ArgumentParser(description="Scrape and save metadata for .safetensors files.")
parser.add_argument("--force", action="store_true", help="Force re-scrape even if metadata file exists.")
parser.add_argument("--scrape-civitai", action="store_true", help="Enable scraping CivitAI metadata + images.")
parser.add_argument("--delay", type=float, default=0.5, help="Delay time (seconds) between API/image steps (default: 0.5s).")
parser.add_argument("--interactive", action="store_true", help="Run in interactive mode.")
parser.add_argument("--loras-only", action="store_true", help="Scan only the LoRAs folder (models/loras).")
parser.add_argument("--previews-subdir", dest="previews_subdir", action="store_true", help="Save preview images in a subdirectory.")
parser.add_argument("--no-previews-subdir", dest="previews_subdir", action="store_false", help="Save preview images in the same folder.")
parser.add_argument("--max-media", type=int, default=None, help="Max number of preview images/videos to download (0 = none).")
parser.set_defaults(previews_subdir=True)
args = parser.parse_args()
if args.interactive:
force, scrape_civitai, delay, loras_only, previews_subdir, max_media = interactive_menu()
else:
force, scrape_civitai, delay, loras_only, previews_subdir, max_media = (
args.force,
args.scrape_civitai,
args.delay,
args.loras_only,
args.previews_subdir,
args.max_media
)
script_dir = os.path.dirname(os.path.abspath(__file__))
if loras_only:
comfyui_dir = os.path.abspath(os.path.join(script_dir, "..", "..", "models", "loras"))
else:
comfyui_dir = os.path.abspath(os.path.join(script_dir, "..", ".."))
tqdm.write(f"Scanning directory: {comfyui_dir}")
process_directory(
comfyui_dir,
force=force,
scrape_civitai=scrape_civitai,
delay=delay,
previews_subdir=previews_subdir,
max_media=max_media
)