danbooru2022_tags / README.md
qwopqwop's picture
Update README.md
dbf7711
metadata
license: mit

These are the tags of the danbooru 2021 + danbooru 2022 dataset.

The dataset is deduplicated by post id, and if there is no post id, the value is sequentially set to -{value}.

load dataset

from datasets import load_dataset
dataset = load_dataset('qwopqwop/danbooru2022_tags')

preprocess code

import os
import glob
import json
import pandas as pd
import concurrent.futures

dataset = {}
ids = {}
#danbooru 2021
#download 'rsync --recursive --verbose rsync://176.9.41.242:873/danbooru2021/metadata/ ./metadata'
json_data = []
for i in range(0,12):
    print(i)
    with open('./metadata/posts0000000000%02d.json'% i, 'r') as f:
        for line in f:
            json_data.append(json.loads(line))

idx = 1
for i in json_data:
    if 'id' in i:
        key = int(i['id'])
    else:
        key = -idx
        idx += 1
    ids[key] = i['tag_string'].replace(' ',', ').replace('_',' ')
del json_data

#danbooru 2022
#download https://huggingface.co/datasets/animelover/danbooru2022
path = []
for idx,i in enumerate(os.listdir('./danbooru2022/')):
    print(idx)
    for j in glob.glob(f'./danbooru2022/{i}/*.txt'):
        path.append(j)

def load_data(path):
    name = int(path.split('/')[-1].split('.')[0])
    with open(path, "r") as f:
        data = f.readline()
    return name,data

with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
    future_to_path = {executor.submit(load_data, p): p for p in path}
    for idx,future in enumerate(concurrent.futures.as_completed(future_to_path)):
        if idx % 1000 == 0:
            print(idx)
        path = future_to_path[future]
        name,data = future.result()
        ids[name] = data

#preprocess
dataset['tags'] = ids
dataset = pd.DataFrame(dataset)

bad_tags = ["absurdres", "highres", "translation request", "translated", "commentary", "commentary request", "commentary typo", "character request", "bad id", "bad link", "bad pixiv id", "bad twitter id", "bad tumblr id", "bad deviantart id", "bad nicoseiga id", "md5 mismatch", "cosplay request", "artist request", "wide image", "author request", "artist name", "banned artist", "duplicate", "pixel-perfect duplicate"]
for i in range(len(dataset)):
    if i % 1000 == 0:
        print(i)
    tags = dataset.iloc[i]['tags'].split(', ')
    dataset.iloc[i] = ', '.join([tag for tag in tags if tag not in bad_tags])
# save
dataset.to_parquet('tags.parquet')