SuperWikiImage-7M / scripts /fetch_wikimedia.py
KaraKaraWitch's picture
Super-squash branch 'main' using huggingface_hub
a454f6c verified
import pathlib
from urllib.parse import unquote
import anyio
import anyio.streams
import anyio.streams.file
import anyio.streams.memory
import anyio.to_thread
import httpx
import orjson
import tqdm
import typer
from loguru import logger
async def download_file(url: str, session: httpx.AsyncClient, path: pathlib.Path):
tries = 10
success = False
if path.exists():
return
tmp = path.with_stem(path.stem + "_tmp")
# There's a file being downloaded that has the same name...
# Actually unsure why it's being flagged out now lol.
if tmp.exists():
return
while tries > 0:
try:
async with session.stream("GET", url) as stream, await anyio.open_file(
tmp, "wb"
) as f:
if stream.status_code not in [302, 200, 404]:
tries -= 1
await anyio.sleep(5)
logger.warning(
f"Failed to download: {path.name}: {stream.status_code}"
)
elif stream.status_code == 404:
logger.warning(f"404 {path.name}, breaking.")
break
else:
async for bytes_data in stream.aiter_bytes():
await f.write(bytes_data)
success = True
break
except Exception as e:
logger.exception(e)
tries -= 1
await anyio.sleep(5)
if not success and tmp.exists():
tmp.unlink()
elif not tmp.exists():
logger.warning("Temp file magically disappeared?")
else:
try:
tmp.rename(path)
except Exception as e:
logger.warning(f"{path.exists(), tmp.exists()} Hotfix Exception")
pass
else:
logger.info(f"{path.name} Downloaded")
async def batched_meta_worker(
queue: anyio.streams.memory.MemoryObjectReceiveStream,
write: anyio.streams.memory.MemoryObjectSendStream,
):
root_url = "https://commons.wikimedia.org/w/api.php"
session = httpx.AsyncClient()
while True:
chunk_titles: list[str] = await queue.receive()
if chunk_titles is None:
break
query = {
"action": "query",
"format": "json",
"formatversion": 2,
"prop": "imageinfo",
"titles": "|".join([f"File:{file}" for file in chunk_titles]),
"iiprop": "extmetadata",
"iiextmetadatalanguage": "en",
"uselang": "content",
}
tries = 10
while tries > 0:
try:
r = await session.get(root_url, params=query)
break
except Exception as e:
tries -= 1
logger.warning(e)
await anyio.sleep(2)
if r.status_code == 414:
logger.warning(f'Request too long: {len("|".join([f"File:{file}" for file in chunk_titles]))}')
if r.status_code not in range(200,300):
logger.warning(f'Caught: {r.status_code}')
continue
response = r.json()
# logger.debug(response["query"]["pages"])
for response_page in response["query"]["pages"]:
if ("missing" in response_page and response_page["missing"]) or "imageinfo" not in response_page:
await write.send(
{
"url": f"https://commons.wikimedia.org/wiki/{response_page['title']}",
"miss": True,
"meta": None,
"wkdata": None,
}
)
continue
# logger.debug(response_page)
ext = response_page["imageinfo"][0]["extmetadata"]
root = f"https://commons.wikimedia.org/wiki/{response_page['title']}"
await write.send(
{
"url": root,
"miss": False,
"meta": ext,
"wkdata": f'M{response_page["pageid"]}',
}
)
async def file_worker(
queue: anyio.streams.memory.MemoryObjectReceiveStream, output_folder: pathlib.Path
):
session = httpx.AsyncClient(follow_redirects=True)
while True:
url: str = await queue.receive()
if url is None:
break
filename = unquote(url.split("/")[-1]).replace("_", " ")
if len(filename) > 128:
truc_stem = pathlib.Path(filename).stem[:128].rstrip()
filename = pathlib.Path(filename).with_stem(truc_stem).name
await download_file(url, session, output_folder / filename)
async def download_async(file: pathlib.Path, output_folder: pathlib.Path):
concurrent = 50
async with anyio.create_task_group() as task_group, await anyio.open_file(
file, "rb"
) as fp:
url_send, url_recv = anyio.create_memory_object_stream[list[str] | None](
max_buffer_size=100
)
[
task_group.start_soon(file_worker, url_recv, output_folder)
for _ in range(concurrent)
]
async for line in fp:
load = await anyio.to_thread.run_sync(orjson.loads, line)
await url_send.send(load["url"])
for _ in range(concurrent):
await url_send.send(None)
async def meta_writer(
output_file: pathlib.Path,
inputstream: anyio.streams.memory.MemoryObjectReceiveStream,
):
pbar = tqdm.tqdm()
async with await anyio.open_file(output_file, "wb") as fp:
while True:
data: dict = await inputstream.receive()
if data is None:
break
dump_bytes: bytes = await anyio.to_thread.run_sync(
orjson.dumps, data, None, orjson.OPT_APPEND_NEWLINE
)
await fp.write(dump_bytes)
pbar.update(1)
async def fetch_meta(file: pathlib.Path, output_file: pathlib.Path):
async with anyio.create_task_group() as task_group, await anyio.open_file(
file, "rb"
) as fp:
title_send, title_recv = anyio.create_memory_object_stream[list[str]](
max_buffer_size=100
)
response_send, response_recv = anyio.create_memory_object_stream[dict](
max_buffer_size=100
)
[
task_group.start_soon(batched_meta_worker, title_recv, response_send)
for _ in range(5)
]
task_group.start_soon(meta_writer, output_file, response_recv)
chunk = []
async for line in fp:
load = await anyio.to_thread.run_sync(orjson.loads, line)
chunk.append(unquote(load["url"].split("/")[-1].replace("_", " ")))
if len(chunk) >= 50:
await title_send.send(chunk)
chunk = []
await title_send.send(chunk)
chunk = []
for _ in range(5):
await title_send.send(None)
await response_send.send(None)
app = typer.Typer(pretty_exceptions_enable=False)
@app.command()
def download(input_file: pathlib.Path, output_folder: pathlib.Path):
logger.add("logs/download-wiki.log", rotation="10 MB")
output_folder = output_folder.resolve()
if not output_folder.is_dir():
output_folder.mkdir(exist_ok=True, parents=True)
anyio.run(download_async, input_file, output_folder, backend="trio")
@app.command()
@logger.catch()
def meta(input_file: pathlib.Path, output_file: pathlib.Path):
logger.add("logs/download-wiki.log", rotation="10 MB")
anyio.run(fetch_meta, input_file, output_file, backend="trio")
if __name__ == "__main__":
app()