graffiti / cli.py
artificialhoney's picture
feat css3 colors
6893772
from transformers import pipeline
from bs4 import BeautifulSoup
import requests
import urllib
import json
import argparse
from pathlib import Path
import os
from controlnet_aux.processor import Processor
from colorthief import ColorThief
from colors import name_that_color, rgb_to_hex, NTC_NAMES, CSS3_NAMES
from PIL import Image
class Scraper(object):
def __init__(self, urls):
self.urls = urls
def fetch(self, url):
return requests.get(url, headers={"User-Agent": "Mozilla/5.0"})
def parse(self, text):
return BeautifulSoup(text, 'html.parser')
def download_image(self, url, path):
image = requests.get(url)
open(path, 'wb').write(image.content)
class GraffitiDatabaseScraper(Scraper):
def __init__(self):
Scraper.__init__(self, ["https://graffiti-database.com"])
def get_image_urls(self, parsed):
pages = parsed.select('.image-info a')
image_urls = []
for page in pages:
image_urls.append(page.get('href'))
return image_urls
def scrape_image(self, url):
print("Scraping image page {0}".format(url))
fetched = self.fetch(url)
if fetched.status_code != 200:
raise "Scraping image page did fail"
text = fetched.text
parsed = self.parse(text)
image = parsed.select('.img-fluid')[0].get('src')
tag_links = parsed.select('a.tag')
url_parts = urllib.parse.unquote(
urllib.parse.unquote(url)).rsplit("/", 2)
artist = url_parts[1]
return {
"image_url": image,
"city": tag_links[-1].get_text().strip(),
"artist": artist if artist != "Writer Unknown" and artist != "Writers Unknown" else None
}
def scrape_page(self, url):
print("Scraping page {0}".format(url))
text = self.fetch(url).text
parsed = self.parse(text)
image_urls = self.get_image_urls(parsed)
if len(image_urls) == 0:
return False
for image_url in image_urls:
try:
result = self.scrape_image(
"https://graffiti-database.com" + image_url)
except:
continue
file_name = result["image_url"].split('/')[-1]
self.download_image(result["image_url"],
'./images/' + file_name)
result["file"] = file_name
with open('./images/' + file_name + '.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(result, indent=2, ensure_ascii=False))
return True
def scrape(self):
url = self.urls[0]
print("Scraping {0}".format(
url))
count = 1
has_images = True
while has_images:
has_images = self.scrape_page(url + "?page=" + str(count))
count += 1
class GraffitiScraper(Scraper):
def __init__(self):
Scraper.__init__(self, ["https://www.graffiti.org/index/world.html",
"https://www.graffiti.org/index/europe.html", "https://www.graffiti.org/index/usa.html"])
def scrape_page(self, url, city_name):
print("Scraping page {0}".format(url))
text = self.fetch(url).text
parsed = self.parse(text)
image_elements = parsed.select('a[href*=".jpg"]')
for image_element in image_elements:
if image_element.get_text().strip() == "":
tags = image_element.find_next_sibling(string=True).get_text(
).strip() if image_element.find_next_sibling(string=True) else ""
else:
continue
image_url = image_element.get("href").replace("/", "_")
url_parts = url.split('/')
url_parts[-1] = image_element.get("href")
self.download_image("/".join(url_parts),
'./images/' + image_url)
with open('./images/' + image_url + '.json', 'w', encoding='utf-8') as f:
f.write(json.dumps({
"file": image_url,
"image_url": "/".join(url_parts),
"artist": tags,
"city": city_name if city_name != "Various cities" else None
}, indent=2, ensure_ascii=False))
def scrape_url(self, url):
print("Scraping url {0}".format(url))
text = self.fetch(url).text
parsed = self.parse(text)
cities = parsed.find_all("h4")
for city in cities:
city_name = city.get_text().split("\n")[0].strip()
pages = city.find_all("a")
for page in pages:
if page.get_text().strip() == "§":
continue
self.scrape_page(
"https://www.graffiti.org/index/" + page.get("href"), city_name)
def scrape(self):
for url in self.urls:
self.scrape_url(url)
class CLI():
def __init__(self):
parser = argparse.ArgumentParser(
prog='graffiti-cli',
description='Tools for setting up the dataset')
subparsers = parser.add_subparsers(dest="command", required=True)
scrape = subparsers.add_parser(
'scrape', help='Scrapes data sources and downloads images')
scrape.add_argument('--source',
default='graffiti.org',
choices=['graffiti.org', 'graffiti-database.com'],
help='Choose data source to scrape')
subparsers.add_parser('cleanup', help='Cleans up downloaded images')
subparsers.add_parser('caption', help='Captions downloaded images')
subparsers.add_parser('palette', help='Creates color palettes for downloaded images')
subparsers.add_parser('condition', help='Condition downloaded images')
metadata = subparsers.add_parser('metadata', help='Creates single meta files from metadata.jsonl')
metadata.add_argument('--source',
default='graffiti.org',
choices=['graffiti.org', 'graffiti-database.com'],
help='Choose data source to use')
args = parser.parse_args()
if args.command == 'scrape':
if args.source == 'graffiti.org':
GraffitiScraper().scrape()
elif args.source == 'graffiti-database.com':
GraffitiDatabaseScraper().scrape()
elif args.command == 'cleanup':
path = Path("./images").rglob("*.jpg")
for i, img_p in enumerate(path):
try:
Image.open(img_p).load()
except Exception:
path_name = str(img_p)
print(path_name + " is broken. Deleting!")
os.remove(path_name)
os.remove(path_name + ".json")
elif args.command == 'caption':
captioner = pipeline(
"image-to-text", model="Salesforce/blip-image-captioning-base")
path = Path("./images").rglob("*.jpg")
count = len(list(Path("./images").rglob("*.jpg")))
for i, img_p in enumerate(path):
path_name = str(img_p)
with open(path_name + ".json", 'r+', encoding='utf-8') as f:
data = json.load(f)
f.seek(0)
caption = captioner(path_name)[0]["generated_text"]
if "album" in caption:
caption = "a wall with graffiti on it"
json.dump({
"file": data["file"],
"image_url": data["image_url"],
"artist": data["artist"],
"city": data["city"],
"caption": caption
}, f, indent=2)
f.truncate()
print("{0} / {1}".format(i + 1, count), path_name, caption)
elif args.command == 'palette':
path = Path("./images").rglob("*.jpg")
count = len(list(Path("./images").rglob("*.jpg")))
for i, img_p in enumerate(path):
path_name = str(img_p)
with open(path_name + ".json", 'r+', encoding='utf-8') as f:
data = json.load(f)
f.seek(0)
color_thief = ColorThief(path_name)
palette = []
for color in color_thief.get_palette(color_count=6):
color_hex = rgb_to_hex(color)
palette.append([color_hex, name_that_color(color_hex, NTC_NAMES), name_that_color(color_hex, CSS3_NAMES)])
json.dump({
"file": data["file"],
"image_url": data["image_url"],
"artist": data["artist"],
"city": data["city"],
"caption": data["caption"],
"palette": palette
}, f, indent=2)
f.truncate()
print("{0} / {1}".format(i + 1, count), path_name, palette)
elif args.command == 'metadata':
with open("data/" + args.source + "/metadata.jsonl", encoding="utf-8") as f:
for row in f:
data = json.loads(row)
with open('./images/' + data["file"] + '.json', 'w') as j:
j.write(json.dumps(data, indent=2, ensure_ascii=False))
elif args.command == 'conditioning':
processor_id = 'softedge_hed'
processor = Processor(processor_id)
path = Path("./images").rglob("*.jpg")
count = len(list(Path("./images").rglob("*.jpg")))
for i, img_p in enumerate(path):
path_name = str(img_p)
output_path = path_name.replace("images/", "conditioning/")
img = Image.open(img_p).convert("RGB")
processed_image = processor(img, to_pil=True)
processed_image.save(output_path)
print("{0} / {1}".format(i + 1, count), path_name, output_path)
def main():
CLI()
if __name__ == "__main__":
main()