Jannchie commited on
Commit
f66a537
1 Parent(s): 73bfe58

feat(scripts): add fetch scripts

Browse files
Files changed (7) hide show
  1. .gitignore +2 -0
  2. README.md +9 -1
  3. danbooru_metadata.db +2 -2
  4. db.py +95 -0
  5. download_images.py +107 -0
  6. requirements.txt +3 -0
  7. update_danbooru.py +176 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv
2
+ __pycache__
README.md CHANGED
@@ -2,4 +2,12 @@
2
  license: mit
3
  ---
4
 
5
- combined <https://huggingface.co/datasets/nyanko7/danbooru2023> and <https://huggingface.co/datasets/deepghs/danbooru_newest>.
 
 
 
 
 
 
 
 
 
2
  license: mit
3
  ---
4
 
5
+ Metadata of all posts from Danbooru.
6
+
7
+ I did not include the media_asset field because it is largely redundant with other fields.
8
+
9
+ Compared to the metadata available at <https://huggingface.co/datasets/nyanko7/danbooru2023>, the data here was collected at a later time. This means that many fields may differ, such as score. Some tags and author aliases are also more complete.
10
+
11
+ Here, we have attached both the script for updating metadata and the script for downloading images based on the metadata database.
12
+
13
+ On my PC, We can download all metadata in about 1 hours.
danbooru_metadata.db CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f32d0ffc3fafb17e43d9efaefb44c280610d2993dcd75881607ad919283c9d1b
3
- size 19277389824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f0d40383d45b546f8546c2fad51c6e398c472e92089d2137f4519e76533295
3
+ size 11417427968
db.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ from sqlalchemy import Column, Float, Index, Integer, MetaData, Table, Text
3
+ from sqlalchemy.ext.declarative import declarative_base
4
+ from sqlalchemy.orm import sessionmaker
5
+ from sqlalchemy import create_engine
6
+
7
+ Base = declarative_base()
8
+ metadata = MetaData()
9
+
10
+ t_posts = Table(
11
+ "posts",
12
+ metadata,
13
+ Column("id", Integer, primary_key=True),
14
+ Column("filename", Text),
15
+ Column("mimetype", Text),
16
+ Column("extension", Text),
17
+ Column("width", Integer),
18
+ Column("height", Integer),
19
+ Column("rating", Text),
20
+ Column("file_url", Text),
21
+ Column("created_at", Text),
22
+ Column("uploader_id", Integer),
23
+ Column("score", Integer),
24
+ Column("source", Text),
25
+ Column("md5", Text),
26
+ Column("last_comment_bumped_at", Text),
27
+ Column("image_width", Integer),
28
+ Column("image_height", Integer),
29
+ Column("tag_string", Text),
30
+ Column("fav_count", Integer),
31
+ Column("file_ext", Text),
32
+ Column("last_noted_at", Text),
33
+ Column("parent_id", Float),
34
+ Column("has_children", Integer),
35
+ Column("approver_id", Float),
36
+ Column("tag_count_general", Integer),
37
+ Column("tag_count_artist", Integer),
38
+ Column("tag_count_character", Integer),
39
+ Column("tag_count_copyright", Integer),
40
+ Column("file_size", Integer),
41
+ Column("up_score", Integer),
42
+ Column("down_score", Integer),
43
+ Column("is_pending", Integer),
44
+ Column("is_flagged", Integer),
45
+ Column("is_deleted", Integer),
46
+ Column("tag_count", Integer),
47
+ Column("updated_at", Text),
48
+ Column("is_banned", Integer),
49
+ Column("pixiv_id", Float),
50
+ Column("last_commented_at", Text),
51
+ Column("has_active_children", Integer),
52
+ Column("bit_flags", Integer),
53
+ Column("tag_count_meta", Integer),
54
+ Column("has_large", Integer),
55
+ Column("has_visible_children", Integer),
56
+ Column("tag_string_general", Text),
57
+ Column("tag_string_character", Text),
58
+ Column("tag_string_copyright", Text),
59
+ Column("tag_string_artist", Text),
60
+ Column("tag_string_meta", Text),
61
+ Column("large_file_url", Text),
62
+ Column("preview_file_url", Text),
63
+ Index("posts_index_file_url", "file_url"),
64
+ Index("posts_index_tag_count_artist", "tag_count_artist"),
65
+ Index("posts_index_tag_count_character", "tag_count_character"),
66
+ Index("posts_index_tag_count_copyright", "tag_count_copyright"),
67
+ Index("posts_index_tag_count_general", "tag_count_general"),
68
+ Index("posts_index_tag_count_meta", "tag_count_meta"),
69
+ Index("posts_index_tag_string", "tag_string"),
70
+ Index("posts_index_tag_string_artist", "tag_string_artist"),
71
+ Index("posts_index_tag_string_character", "tag_string_character"),
72
+ Index("posts_index_tag_string_copyright", "tag_string_copyright"),
73
+ Index("posts_index_tag_string_general", "tag_string_general"),
74
+ Index("posts_index_tag_string_meta", "tag_string_meta"),
75
+ Index("posts_index_rating", "rating"),
76
+ Index("posts_index_score", "score"),
77
+ )
78
+
79
+
80
+ def connect_db(db_name):
81
+ return sqlite3.connect(db_name)
82
+
83
+
84
+ def get_engine(db_name):
85
+
86
+ return create_engine(f"sqlite:///{db_name}")
87
+
88
+
89
+ def get_session(engine):
90
+ Session = sessionmaker(bind=engine)
91
+ return Session()
92
+
93
+
94
+ if __name__ == "__main__":
95
+ print("Database has been created.")
download_images.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ import os
3
+ import threading
4
+ import queue
5
+ import requests
6
+
7
+ # Database and output directory setup
8
+ DB_PATH = "danbooru_metadata.db"
9
+ OUT_DIR = "./image"
10
+
11
+ # Number of download threads
12
+ NUM_THREADS = 16
13
+
14
+ # Task queue to hold file URLs and IDs
15
+ task_queue = queue.Queue()
16
+
17
+ IMAGE_EXTENSIONS = [
18
+ "png",
19
+ "jpg",
20
+ "jpeg",
21
+ "webp",
22
+ "bmp",
23
+ "PNG",
24
+ "JPG",
25
+ "JPEG",
26
+ "WEBP",
27
+ "BMP",
28
+ ]
29
+
30
+
31
+ def fetch_posts_to_queue():
32
+ """Fetch posts from SQLite database and put them in the task queue."""
33
+ conn = sqlite3.connect(DB_PATH)
34
+ cursor = conn.cursor()
35
+ print("Fetching posts")
36
+ cursor.execute("SELECT id, file_url, file_ext FROM posts")
37
+ while True:
38
+ post = cursor.fetchone()
39
+ if post is None:
40
+ break
41
+ (post_id, file_url, file_ext) = post
42
+
43
+ # Check if the file already exists
44
+ if os.path.exists(os.path.join(OUT_DIR, f"{post_id}.{file_ext}")):
45
+ continue
46
+
47
+ # Check if the file_ext is an image
48
+ if file_url is not None and file_ext in IMAGE_EXTENSIONS:
49
+ task_queue.put((post_id, file_url))
50
+ print("All posts fetched")
51
+ conn.close()
52
+
53
+
54
+ def download_image(post_id, url):
55
+ """Download image from the given URL and save it to the out_dir."""
56
+ try:
57
+ response = requests.get(url, stream=True)
58
+ response.raise_for_status()
59
+
60
+ # Calculate directory based on post_id and create it if necessary
61
+ directory = os.path.join(OUT_DIR, f"{post_id % 1000:04d}")
62
+ os.makedirs(directory, exist_ok=True)
63
+ ext = url.split(".")[-1]
64
+ # Determine file path and save the image
65
+ file_path = os.path.join(directory, f"{post_id}.{ext}")
66
+ with open(file_path, "wb") as f:
67
+ for chunk in response.iter_content(1024):
68
+ f.write(chunk)
69
+ # print(f"Downloaded {file_path}")
70
+ except requests.RequestException as e:
71
+ print(f"Failed to download {url}: {e}")
72
+
73
+
74
+ def worker():
75
+ """Worker thread function to download images."""
76
+ while True:
77
+ try:
78
+ post_id, url = task_queue.get()
79
+ except queue.Empty:
80
+ break
81
+ download_image(post_id, url)
82
+ task_queue.task_done()
83
+
84
+
85
+ if __name__ == "__main__":
86
+ # Populate the task queue
87
+
88
+ # Create and start worker threads
89
+ threads = []
90
+ for _ in range(NUM_THREADS):
91
+ thread = threading.Thread(target=worker)
92
+ thread.start()
93
+ threads.append(thread)
94
+ # Wait for all threads to finish
95
+ try:
96
+ fetch_posts_to_queue()
97
+
98
+ # Ensure all tasks are done
99
+ task_queue.join()
100
+
101
+ for thread in threads:
102
+ thread.join()
103
+
104
+ print("All downloads completed.")
105
+ except KeyboardInterrupt:
106
+ print("Download interrupted.")
107
+ os._exit(1)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ requests
2
+ rich
3
+ sqlalchemy
update_danbooru.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import queue
2
+ import threading
3
+ import time
4
+ from time import sleep
5
+
6
+ import requests
7
+ from rich.progress import (
8
+ BarColumn,
9
+ Progress,
10
+ TextColumn,
11
+ TimeElapsedColumn,
12
+ TimeRemainingColumn,
13
+ )
14
+ from sqlalchemy import text
15
+ import argparse
16
+
17
+ from db import get_engine, t_posts
18
+
19
+ url_queue = queue.Queue(maxsize=64)
20
+ data_queue = queue.Queue()
21
+
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument("--start_id", type=int, default=1)
24
+ parser.add_argument("--api_key", type=str)
25
+ parser.add_argument("--login", type=str)
26
+ parser.add_argument("--workers", type=int, default=16)
27
+ args = parser.parse_args()
28
+
29
+ API_KEY = args.api_key
30
+ LOGIN = args.login
31
+
32
+
33
+ start_id = args.start_id
34
+
35
+ base_api_url = (
36
+ "https://danbooru.donmai.us/posts.json?page=a{}&limit=200&api_key={}&login={}"
37
+ )
38
+ latest_id_url = "https://danbooru.donmai.us/posts.json?api_key={}&login={}"
39
+
40
+ latest_id = 1 # latset id
41
+
42
+
43
+ # get latest ID
44
+ def fetch_latest_id(max_retries=5, backoff_factor=1):
45
+ global latest_id
46
+ attempt = 0
47
+
48
+ while attempt < max_retries:
49
+ try:
50
+ response = requests.get(latest_id_url.format(API_KEY, LOGIN))
51
+ if response.status_code == 200:
52
+ data = response.json()
53
+ if data and "id" in data[0]:
54
+ latest_id = data[0]["id"]
55
+ return # 成功获取到ID后退出函数
56
+ else:
57
+ print(f"Failed to retrieve latest ID: {response.status_code}")
58
+ except Exception as e:
59
+ print(f"An error occurred while fetching the latest ID: {e}")
60
+
61
+ attempt += 1
62
+ sleep_time = backoff_factor * (2**attempt)
63
+ print(f"Retrying in {sleep_time} seconds...")
64
+ time.sleep(sleep_time)
65
+
66
+ print(f"Failed to retrieve latest ID after {max_retries} attempts")
67
+
68
+
69
+ def update_latest_id():
70
+ while True:
71
+ fetch_latest_id()
72
+ time.sleep(60)
73
+
74
+
75
+ def fetch_data(max_retries=3, retry_delay=2):
76
+ while True:
77
+ url = url_queue.get() # get url from url_queue
78
+ if url is None:
79
+ break
80
+
81
+ retry_count = 0
82
+ while retry_count < max_retries:
83
+ try:
84
+ response = requests.get(url)
85
+ if response.status_code == 200:
86
+ data = response.json()
87
+ data_queue.put(data)
88
+ break # 成功的请求,无需重试,跳出重试循环
89
+ else:
90
+ print(f"Failed to fetch data from {url}: {response.status_code}")
91
+ retry_count += 1
92
+ time.sleep(retry_delay)
93
+
94
+ except Exception as e:
95
+ retry_count += 1
96
+ print(
97
+ f"An error occurred while fetching data from {url}: {e}. Retrying in {retry_delay} seconds..."
98
+ )
99
+ time.sleep(retry_delay)
100
+
101
+ url_queue.task_done()
102
+
103
+
104
+ # create 16 threads to fetch data
105
+ threads = []
106
+ for _ in range(args.workers):
107
+ thread = threading.Thread(target=fetch_data)
108
+ thread.daemon = True
109
+ thread.start()
110
+ threads.append(thread)
111
+
112
+ # 启动线程来定期更新最新 ID
113
+ id_update_thread = threading.Thread(target=update_latest_id)
114
+ id_update_thread.daemon = True
115
+ id_update_thread.start()
116
+
117
+ required_columns = [col.name for col in t_posts.columns if not col.primary_key]
118
+
119
+ danbooru_engine = get_engine("danbooru_metadata.db")
120
+ t_posts.metadata.create_all(danbooru_engine)
121
+
122
+ # 获取数据库中最大的 ID
123
+ with danbooru_engine.connect() as conn:
124
+ res = conn.execute(text("SELECT MAX(id) FROM posts"))
125
+ latest_id = res.fetchone()[0] or 1
126
+
127
+ start_id = args.start_id
128
+ print("start_id:", start_id)
129
+
130
+
131
+ def save_data():
132
+ while True:
133
+ data = data_queue.get()
134
+ for column in required_columns:
135
+ for d in data:
136
+ if column not in d:
137
+ d[column] = None
138
+ with danbooru_engine.connect() as conn:
139
+ conn.execute(t_posts.insert().prefix_with("OR REPLACE"), data)
140
+ conn.commit()
141
+
142
+
143
+ save_thread = threading.Thread(target=save_data)
144
+ save_thread.daemon = True
145
+ save_thread.start()
146
+
147
+
148
+ try:
149
+ with Progress(
150
+ TextColumn("[progress.description]{task.description}"),
151
+ BarColumn(),
152
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
153
+ TimeElapsedColumn(),
154
+ TimeRemainingColumn(),
155
+ TextColumn("[{task.completed} / {task.total}]"),
156
+ ) as progress:
157
+ # 添加一个任务,注意 total 参数设置为 latest_id
158
+ task = progress.add_task(
159
+ "Processing", start=True, total=latest_id - start_id + 1
160
+ )
161
+ current_id = start_id
162
+
163
+ while True:
164
+ if current_id >= latest_id:
165
+ progress.update(task, completed=latest_id) # 确保进度条满格
166
+ sleep(2)
167
+ continue
168
+
169
+ url = base_api_url.format(current_id, API_KEY, LOGIN)
170
+ url_queue.put(url) # 将生成的 URL 放入 URL ��列
171
+ current_id += 200
172
+ progress.update(task, completed=current_id, total=latest_id)
173
+ sleep(0.1)
174
+ except KeyboardInterrupt:
175
+ print("Stopped by user")
176
+ exit(0)