v2ray commited on
Commit
5c28faf
1 Parent(s): 124b733

Gelbooru scraper now saves JSON instead of TXT.

Browse files
Files changed (4) hide show
  1. compress.py +7 -13
  2. scrape_gel.py +49 -56
  3. utils/scrape_args.py +0 -1
  4. utils/utils.py +38 -29
compress.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import sys
 
3
  import argparse
4
  import tarfile
5
  from constants import *
@@ -7,18 +8,15 @@ import concurrent.futures
7
 
8
  def compress_chunk(chunk, chunk_index, output_dir):
9
  with tarfile.open(os.path.join(output_dir, f"chunk_{chunk_index}.tar"), "w") as tar:
10
- for image_path in chunk:
11
- tags_path = os.path.splitext(image_path)[0] + ".txt"
12
- if not os.path.isfile(tags_path):
13
- continue
14
  tar.add(image_path, arcname=os.path.basename(image_path))
15
- tar.add(tags_path, arcname=os.path.basename(tags_path))
16
 
17
  def parse_args():
18
  parser = argparse.ArgumentParser(description="Group images into uncompressed tar files.")
19
  parser.add_argument("-i", "--input-dir", default=IMAGE_DIR, help="Input directory for the images to chunk into tars")
20
  parser.add_argument("-o", "--output-dir", default=COMPRESSED_DIR, help="Output directory for chunked tars")
21
- parser.add_argument("-n", "--num-images-per-chunk", type=int, default=1024, help="Number of images per chunk, default to 1024")
22
  args = parser.parse_args()
23
  if args.num_images_per_chunk < 1:
24
  print("Number of images per chunk needs to be a positive integer!")
@@ -27,16 +25,12 @@ def parse_args():
27
 
28
  def main():
29
  args = parse_args()
30
- if not os.path.isdir(args.input_dir):
31
- print(f"Your input dir \"{args.input_dir}\" doesn't exist or isn't a directory!")
32
- sys.exit(1)
33
- image_files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if not f.endswith(".txt")]
34
- image_files.sort()
35
  os.makedirs(args.output_dir, exist_ok=True)
36
  with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
37
  futures = []
38
- for i in range(0, len(image_files), args.num_images_per_chunk):
39
- chunk = image_files[i:i + args.num_images_per_chunk]
40
  chunk_index = i // args.num_images_per_chunk
41
  future = executor.submit(compress_chunk, chunk, chunk_index, args.output_dir)
42
  futures.append(future)
 
1
  import os
2
  import sys
3
+ import utils
4
  import argparse
5
  import tarfile
6
  from constants import *
 
8
 
9
  def compress_chunk(chunk, chunk_index, output_dir):
10
  with tarfile.open(os.path.join(output_dir, f"chunk_{chunk_index}.tar"), "w") as tar:
11
+ for image_path, metadata_path in chunk:
 
 
 
12
  tar.add(image_path, arcname=os.path.basename(image_path))
13
+ tar.add(metadata_path, arcname=os.path.basename(metadata_path))
14
 
15
  def parse_args():
16
  parser = argparse.ArgumentParser(description="Group images into uncompressed tar files.")
17
  parser.add_argument("-i", "--input-dir", default=IMAGE_DIR, help="Input directory for the images to chunk into tars")
18
  parser.add_argument("-o", "--output-dir", default=COMPRESSED_DIR, help="Output directory for chunked tars")
19
+ parser.add_argument("-n", "--num-images-per-chunk", type=int, default=sys.maxsize, help="Number of images per chunk, default to infinite")
20
  args = parser.parse_args()
21
  if args.num_images_per_chunk < 1:
22
  print("Number of images per chunk needs to be a positive integer!")
 
25
 
26
  def main():
27
  args = parse_args()
28
+ image_metadata_path_tuple_list = [e[1] for e in sorted(utils.get_image_id_image_metadata_path_tuple_dict(args.input_dir).items(), key=lambda x: x[0])]
 
 
 
 
29
  os.makedirs(args.output_dir, exist_ok=True)
30
  with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
31
  futures = []
32
+ for i in range(0, len(image_metadata_path_tuple_list), args.num_images_per_chunk):
33
+ chunk = image_metadata_path_tuple_list[i:i + args.num_images_per_chunk]
34
  chunk_index = i // args.num_images_per_chunk
35
  future = executor.submit(compress_chunk, chunk, chunk_index, args.output_dir)
36
  futures.append(future)
scrape_gel.py CHANGED
@@ -2,8 +2,8 @@ import os
2
  import re
3
  import sys
4
  import time
 
5
  import utils
6
- import random
7
  import urllib
8
  import asyncio
9
  import aiohttp
@@ -13,13 +13,13 @@ import concurrent
13
  import html as libhtml
14
  from constants import *
15
  from bs4 import BeautifulSoup
16
- from collections import defaultdict
17
 
18
- def get_tags(soup, preserve_meta_tags):
19
  tag_ul = soup.find("ul", id="tag-list")
20
  if not tag_ul:
21
  raise RuntimeError("No tag list found in this web page!")
22
- type_tag_list_dict = defaultdict(list)
 
23
  for element in tag_ul.find_all("li"):
24
  class_name = element.get("class")
25
  if not class_name or len(class_name) != 1:
@@ -27,21 +27,17 @@ def get_tags(soup, preserve_meta_tags):
27
  class_name = class_name[0]
28
  if not class_name.startswith("tag-type-"):
29
  continue
30
- type_tag_list_dict[class_name[9:]].append(element.find("a", recursive=False).contents[0].replace("_", " "))
31
- tags = []
32
- for type, tag_list in type_tag_list_dict.items():
33
- if not preserve_meta_tags:
34
- match type:
35
- case "copyright":
36
- if "minecraft" in tag_list:
37
- tags.append("minecraft")
38
- continue
39
- case "artist":
40
- continue
41
- case "metadata":
42
- continue
43
- tags += tag_list
44
- return tags
45
 
46
  async def process_link(scrape_args, scrape_state):
47
  image_id = re.search(r"id=(\d+)", scrape_args.target).group(1)
@@ -64,8 +60,11 @@ async def process_link(scrape_args, scrape_state):
64
  query_used_time = time.time() - query_start_time
65
  soup = BeautifulSoup(html, "html.parser")
66
  score_span = soup.find("span", id="psc" + image_id)
67
- if score_span:
68
- scrape_state.last_reached_image_score = int(score_span.contents[0])
 
 
 
69
  if image_id_already_exists:
70
  # print(f"Image {image_id} already exists, skipped.")
71
  return
@@ -84,45 +83,44 @@ async def process_link(scrape_args, scrape_state):
84
  if image_ext not in IMAGE_EXT:
85
  print(f"Image {image_id} is not an image, skipped.")
86
  return
87
- tags = get_tags(soup, scrape_args.preserve_meta_tags)
88
- tag_count = len(tags)
89
  if tag_count < scrape_args.min_tags:
90
  # print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
91
  return
92
- rating = image_container["data-rating"]
93
- if rating == "explicit": tags.append("nsfw")
94
- elif rating == "questionable": tags.append("qfw")
95
- else: tags.append("sfw")
96
- random.shuffle(tags)
 
97
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
98
- tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
99
- tags_text = ", ".join(tags)
100
  download_start_time = time.time()
101
  async with scrape_state.session.get(image_download_url) as img_response:
102
  img_data = await img_response.read()
103
  download_used_time = time.time() - download_start_time
104
  async with aiofiles.open(image_path, "wb") as f:
105
  await f.write(img_data)
106
- async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
107
- await f.write(tags_text)
108
- if not await utils.submit_validation(scrape_state.thread_pool, image_path, tags_path, scrape_args.width, scrape_args.height, scrape_args.convert_to_avif):
109
- scrape_state.existing_image_ids.remove(image_id)
110
- else:
111
- scrape_state.scraped_image_count += 1
112
- total_query_time = scrape_state.avg_query_time[0] * scrape_state.avg_query_time[1] + query_used_time
113
- total_download_time = scrape_state.avg_download_time[0] * scrape_state.avg_download_time[1] + download_used_time
114
- scrape_state.avg_query_time[1] += 1
115
- scrape_state.avg_download_time[1] += 1
116
- scrape_state.avg_query_time[0] = total_query_time / scrape_state.avg_query_time[1]
117
- scrape_state.avg_download_time[0] = total_download_time / scrape_state.avg_download_time[1]
118
- interval = 1000
119
- if scrape_state.scraped_image_count % interval == 0:
120
- print(
121
- f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images, "
122
- f"stats for the last {interval} images: [Average query time: {scrape_state.avg_query_time[0]:.3f}s | Average download time: {scrape_state.avg_download_time[0]:.3f}s]"
123
- )
124
- scrape_state.avg_query_time = [0.0, 0]
125
- scrape_state.avg_download_time = [0.0, 0]
126
  return
127
  except Exception as e:
128
  error = e
@@ -142,7 +140,6 @@ def parse_args():
142
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
143
  parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
144
  parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
145
- parser.add_argument("-p", "--preserve-meta-tags", action="store_true", help="Preserve artist, copyright, and metadata tags")
146
  parser.add_argument("-m", "--max-scrape-count", type=int, help="Stop after scraping the set amount of images, may not be exact because of the asynchronous nature of this script, default to infinite")
147
  parser.add_argument("-c", "--continuous-scraping", action="store_true", help="If set, will scraping continuously even when reaching the 20000 images Gelbooru search depth cap by adjusting search tags")
148
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, default to all")
@@ -170,8 +167,6 @@ def parse_args():
170
  if isinstance(args.max_scrape_count, int) and args.max_scrape_count <= 0:
171
  print("Maximum scrape count must be greater than 0!")
172
  sys.exit(1)
173
- if not args.tags_to_search:
174
- args.tags_to_search = ["all"]
175
  return args
176
 
177
  async def main():
@@ -225,9 +220,7 @@ async def main():
225
  if task.done():
226
  await task
227
  del tasks[i]
228
- tasks.append(asyncio.create_task(process_link(
229
- utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.preserve_meta_tags, args.max_scrape_count), scrape_state
230
- )))
231
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
232
  break
233
  session_refresh_counter += 1
 
2
  import re
3
  import sys
4
  import time
5
+ import json
6
  import utils
 
7
  import urllib
8
  import asyncio
9
  import aiohttp
 
13
  import html as libhtml
14
  from constants import *
15
  from bs4 import BeautifulSoup
 
16
 
17
+ def get_type_tags_dict(soup):
18
  tag_ul = soup.find("ul", id="tag-list")
19
  if not tag_ul:
20
  raise RuntimeError("No tag list found in this web page!")
21
+ type_tags_dict = {}
22
+ tags_in_dict = set()
23
  for element in tag_ul.find_all("li"):
24
  class_name = element.get("class")
25
  if not class_name or len(class_name) != 1:
 
27
  class_name = class_name[0]
28
  if not class_name.startswith("tag-type-"):
29
  continue
30
+ tag = element.find("a", recursive=False).contents[0].replace(" ", "_").strip(",_")
31
+ if tag in tags_in_dict:
32
+ continue
33
+ tag_type = class_name[9:]
34
+ tag_list = type_tags_dict.get(tag_type)
35
+ if tag_list is None:
36
+ type_tags_dict[tag_type] = [tag]
37
+ else:
38
+ tag_list.append(tag)
39
+ tags_in_dict.add(tag)
40
+ return type_tags_dict, len(tags_in_dict)
 
 
 
 
41
 
42
  async def process_link(scrape_args, scrape_state):
43
  image_id = re.search(r"id=(\d+)", scrape_args.target).group(1)
 
60
  query_used_time = time.time() - query_start_time
61
  soup = BeautifulSoup(html, "html.parser")
62
  score_span = soup.find("span", id="psc" + image_id)
63
+ try:
64
+ image_score = int(score_span.contents[0])
65
+ except (AttributeError, IndexError, ValueError) as e:
66
+ raise RuntimeError("Error while getting the image score: " + str(e))
67
+ scrape_state.last_reached_image_score = image_score
68
  if image_id_already_exists:
69
  # print(f"Image {image_id} already exists, skipped.")
70
  return
 
83
  if image_ext not in IMAGE_EXT:
84
  print(f"Image {image_id} is not an image, skipped.")
85
  return
86
+ type_tags_dict, tag_count = get_type_tags_dict(soup)
 
87
  if tag_count < scrape_args.min_tags:
88
  # print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
89
  return
90
+ rating = image_container.get("data-rating")
91
+ if not rating:
92
+ raise RuntimeError("No rating found.")
93
+ if rating == "safe":
94
+ rating = "general"
95
+ metadata = json.dumps({"image_id": image_id, "score": image_score, "rating": rating, "tags": type_tags_dict}, ensure_ascii=False, separators=(",", ":"))
96
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
97
+ metadata_path = os.path.join(IMAGE_DIR, image_id + ".json")
 
98
  download_start_time = time.time()
99
  async with scrape_state.session.get(image_download_url) as img_response:
100
  img_data = await img_response.read()
101
  download_used_time = time.time() - download_start_time
102
  async with aiofiles.open(image_path, "wb") as f:
103
  await f.write(img_data)
104
+ async with aiofiles.open(metadata_path, "w", encoding="utf8") as f:
105
+ await f.write(metadata)
106
+ if not await utils.submit_validation(scrape_state.thread_pool, image_path, metadata_path, scrape_args.width, scrape_args.height, scrape_args.convert_to_avif):
107
+ return
108
+ scrape_state.scraped_image_count += 1
109
+ total_query_time = scrape_state.avg_query_time[0] * scrape_state.avg_query_time[1] + query_used_time
110
+ total_download_time = scrape_state.avg_download_time[0] * scrape_state.avg_download_time[1] + download_used_time
111
+ scrape_state.avg_query_time[1] += 1
112
+ scrape_state.avg_download_time[1] += 1
113
+ scrape_state.avg_query_time[0] = total_query_time / scrape_state.avg_query_time[1]
114
+ scrape_state.avg_download_time[0] = total_download_time / scrape_state.avg_download_time[1]
115
+ interval = 1000
116
+ if scrape_state.scraped_image_count % interval != 0:
117
+ return
118
+ print(
119
+ f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images, "
120
+ f"stats for the last {interval} images: [Average query time: {scrape_state.avg_query_time[0]:.3f}s | Average download time: {scrape_state.avg_download_time[0]:.3f}s]"
121
+ )
122
+ scrape_state.avg_query_time = [0.0, 0]
123
+ scrape_state.avg_download_time = [0.0, 0]
124
  return
125
  except Exception as e:
126
  error = e
 
140
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
141
  parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
142
  parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
 
143
  parser.add_argument("-m", "--max-scrape-count", type=int, help="Stop after scraping the set amount of images, may not be exact because of the asynchronous nature of this script, default to infinite")
144
  parser.add_argument("-c", "--continuous-scraping", action="store_true", help="If set, will scraping continuously even when reaching the 20000 images Gelbooru search depth cap by adjusting search tags")
145
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, default to all")
 
167
  if isinstance(args.max_scrape_count, int) and args.max_scrape_count <= 0:
168
  print("Maximum scrape count must be greater than 0!")
169
  sys.exit(1)
 
 
170
  return args
171
 
172
  async def main():
 
220
  if task.done():
221
  await task
222
  del tasks[i]
223
+ tasks.append(asyncio.create_task(process_link(utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.max_scrape_count), scrape_state)))
 
 
224
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
225
  break
226
  session_refresh_counter += 1
utils/scrape_args.py CHANGED
@@ -9,5 +9,4 @@ class ScrapeArgs:
9
  convert_to_avif: bool = False
10
  use_low_quality: bool = False
11
  min_tags: int = 0
12
- preserve_meta_tags: bool = False
13
  max_scrape_count: Optional[int] = None
 
9
  convert_to_avif: bool = False
10
  use_low_quality: bool = False
11
  min_tags: int = 0
 
12
  max_scrape_count: Optional[int] = None
utils/utils.py CHANGED
@@ -3,7 +3,7 @@ import asyncio
3
  import aiohttp
4
  from PIL import Image
5
 
6
- def validate_image(image_path, tags_path, width=None, height=None, convert_to_avif=False):
7
  new_path = None
8
  try:
9
  with Image.open(image_path) as img:
@@ -30,10 +30,10 @@ def validate_image(image_path, tags_path, width=None, height=None, convert_to_av
30
  except Exception as e:
31
  print("Error deleting image file:", e)
32
  try:
33
- os.remove(tags_path)
34
- print(f"Deleted invalid image and tags files: {image_path}, {tags_path}")
35
  except Exception as e:
36
- print("Error deleting tags file:", e)
37
  try:
38
  if new_path is not None and os.path.isfile(new_path):
39
  os.remove(new_path)
@@ -42,28 +42,28 @@ def validate_image(image_path, tags_path, width=None, height=None, convert_to_av
42
  print("Error deleting new image file:", e)
43
  return False
44
 
45
- async def submit_validation(thread_pool, image_path, tags_path, width=None, height=None, convert_to_avif=False):
46
- return await asyncio.wrap_future(thread_pool.submit(validate_image, image_path, tags_path, width, height, convert_to_avif))
47
 
48
- def get_image_id_image_tags_path_tuple_dict(image_dir):
49
  if not os.path.isdir(image_dir):
50
  raise FileNotFoundError(f"\"{image_dir}\" is not a directory!")
51
- image_id_image_tags_path_tuple_dict = {}
52
  for path in os.listdir(image_dir):
53
  image_id, ext = os.path.splitext(path)
54
- if ext == ".txt":
55
  continue
56
  path = os.path.join(image_dir, path)
57
  if not os.path.isfile(path):
58
  continue
59
- tags_path = os.path.splitext(path)[0] + ".txt"
60
- if not os.path.isfile(tags_path):
61
  continue
62
- image_id_image_tags_path_tuple_dict[image_id] = (path, tags_path)
63
- return image_id_image_tags_path_tuple_dict
64
 
65
  def get_existing_image_id_set(image_dir):
66
- return set(get_image_id_image_tags_path_tuple_dict(image_dir))
67
 
68
  def get_session(timeout=None, cookies=None):
69
  kwargs = {"connector": aiohttp.TCPConnector(limit=0, ttl_dns_cache=600), "cookies": cookies}
@@ -88,18 +88,27 @@ def get_model_tags(model_tags_path):
88
  raise ValueError(f"The index specified in \"{model_tags_path}\" is not continuous!")
89
  return [tag for _, tag in sorted_index_tag_tuple_list]
90
 
91
- def get_tags_set(tags_path):
92
- if not os.path.isfile(tags_path):
93
- raise FileNotFoundError(f"\"{tags_path}\" is not a file!")
94
- with open(tags_path, "r", encoding="utf8") as tags_file:
95
- tags_text = tags_file.read()
96
- tags_set = set()
97
- for tag in tags_text.split(","):
98
- tag = tag.strip()
99
- if tag:
100
- tag = tag.replace(" ", "_")
101
- if tag == "nsfw": tag = "rating:explicit"
102
- elif tag == "qfw": tag = "rating:questionable"
103
- elif tag == "sfw": tag = "rating:safe"
104
- tags_set.add(tag)
105
- return tags_set
 
 
 
 
 
 
 
 
 
 
3
  import aiohttp
4
  from PIL import Image
5
 
6
+ def validate_image(image_path, metadata_path, width=None, height=None, convert_to_avif=False):
7
  new_path = None
8
  try:
9
  with Image.open(image_path) as img:
 
30
  except Exception as e:
31
  print("Error deleting image file:", e)
32
  try:
33
+ os.remove(metadata_path)
34
+ print(f"Deleted invalid image and metadata files: {image_path}, {metadata_path}")
35
  except Exception as e:
36
+ print("Error deleting metadata file:", e)
37
  try:
38
  if new_path is not None and os.path.isfile(new_path):
39
  os.remove(new_path)
 
42
  print("Error deleting new image file:", e)
43
  return False
44
 
45
+ async def submit_validation(thread_pool, image_path, metadata_path, width=None, height=None, convert_to_avif=False):
46
+ return await asyncio.wrap_future(thread_pool.submit(validate_image, image_path, metadata_path, width, height, convert_to_avif))
47
 
48
+ def get_image_id_image_metadata_path_tuple_dict(image_dir):
49
  if not os.path.isdir(image_dir):
50
  raise FileNotFoundError(f"\"{image_dir}\" is not a directory!")
51
+ image_id_image_metadata_path_tuple_dict = {}
52
  for path in os.listdir(image_dir):
53
  image_id, ext = os.path.splitext(path)
54
+ if ext == ".json":
55
  continue
56
  path = os.path.join(image_dir, path)
57
  if not os.path.isfile(path):
58
  continue
59
+ metadata_path = os.path.splitext(path)[0] + ".json"
60
+ if not os.path.isfile(metadata_path):
61
  continue
62
+ image_id_image_metadata_path_tuple_dict[image_id] = (path, metadata_path)
63
+ return image_id_image_metadata_path_tuple_dict
64
 
65
  def get_existing_image_id_set(image_dir):
66
+ return set(get_image_id_image_metadata_path_tuple_dict(image_dir))
67
 
68
  def get_session(timeout=None, cookies=None):
69
  kwargs = {"connector": aiohttp.TCPConnector(limit=0, ttl_dns_cache=600), "cookies": cookies}
 
88
  raise ValueError(f"The index specified in \"{model_tags_path}\" is not continuous!")
89
  return [tag for _, tag in sorted_index_tag_tuple_list]
90
 
91
+ def get_metadata(metadata_path):
92
+ if not os.path.isfile(metadata_path):
93
+ raise FileNotFoundError(f"\"{metadata_path}\" is not a file!")
94
+ with open(metadata_path, "r", encoding="utf8") as metadata_file:
95
+ return json.load(metadata_file)
96
+
97
+ def get_tags(metadata_path, exclude=None, include=None):
98
+ if exclude is not None and include is not None:
99
+ raise ValueError("You can't set both exclude and include, please only set one.")
100
+ metadata = get_metadata(metadata_path)
101
+ tags = ["rating:" + metadata["rating"]]
102
+ type_tags_dict = metadata.get("tags")
103
+ if not type_tags_dict:
104
+ return tags
105
+ if exclude is not None:
106
+ for e in exclude:
107
+ type_tags_dict.pop(e, None)
108
+ elif include is not None:
109
+ for k in list(type_tags_dict):
110
+ if k not in include:
111
+ type_tags_dict.pop(k)
112
+ for l in type_tags_dict.values():
113
+ tags += l
114
+ return tags