Datasets:
Size:
10B<n<100B
| import concurrent.futures as conc | |
| import faulthandler | |
| import pathlib | |
| import traceback | |
| import typing | |
| import orjson | |
| import tqdm | |
| import typer | |
| from bs4 import BeautifulSoup, Tag | |
| from markdownify import MarkdownConverter | |
| from html2markdown import WikiConverter | |
| app = typer.Typer() | |
| def wikipedia_figures(soup: BeautifulSoup, converter: MarkdownConverter): | |
| """Extracts figures | |
| Args: | |
| soup (BeautifulSoup): The BeautifulSoup4 article | |
| meta (dict): Metadata | |
| Returns: | |
| list of figures | |
| """ | |
| figure_url = set() | |
| figures = [] | |
| title = soup.find("title") | |
| title = None if not title else title.get_text() | |
| # print(title) | |
| for figure_element in soup.select('figure[typeof*="mw:File"]'): | |
| # print(figure_element) | |
| figcaption = figure_element.find("figcaption") | |
| img = figure_element.select_one("a > img") | |
| if not figcaption or not img: | |
| continue | |
| if not figcaption.get_text(strip=True): | |
| sibling = figure_element.next_sibling | |
| if ( | |
| sibling | |
| and isinstance(sibling,Tag) | |
| and sibling.name == "div" | |
| and "infobox-caption" in sibling.attrs.get("class",[]) | |
| ): | |
| figcaption = sibling | |
| figcaption = None | |
| # if figcaption is None: | |
| # print("figcap is none", title, figure_element.parent) | |
| # continue | |
| if "commons/" not in img["src"]: | |
| # print("NotCommons", title, figcaption) | |
| continue | |
| orig_src = "/".join( | |
| img.get("src","").replace("commons/thumb", "commons").split("/")[:-1] | |
| ) | |
| if orig_src.endswith((".svg",)): | |
| continue | |
| if orig_src.endswith("/"): | |
| print(title, figure_element) | |
| continue | |
| caption = None | |
| if isinstance(figcaption,Tag): | |
| [i.decompose() for i in figcaption.find_all("style")] | |
| caption = ( | |
| converter.convert_soup(figcaption) | |
| .replace("\r", " ") | |
| .replace("\n", " ") | |
| .replace(" ", " ") | |
| .replace(" ", " ") | |
| ) | |
| if orig_src in figure_url: | |
| continue | |
| figure_url.add(orig_src) | |
| orig_src = f"https:{orig_src}" | |
| figures.append([orig_src, caption]) | |
| # print(orig_src, caption) | |
| return figures | |
| def process_root(folder: pathlib.Path, output_folder: pathlib.Path): | |
| futures:list[conc.Future] = [] | |
| faulthandler.enable(file=open("crash_dump.txt", "a")) | |
| with conc.ProcessPoolExecutor(max_workers=90) as executor: | |
| for root_folder in folder.iterdir(): | |
| if root_folder.is_dir(): | |
| processed_root = (output_folder / root_folder.name).resolve() | |
| print("Processing Root", root_folder, processed_root) | |
| if not root_folder.is_dir(): | |
| processed_root.mkdir(exist_ok=True, parents=True) | |
| # process_folder(root_folder, output_folder / root_folder.name) | |
| for root_file in root_folder.glob("*.ndjson"): | |
| futures.append( | |
| executor.submit( | |
| process_file, | |
| root_file, | |
| processed_root / root_file.name, | |
| progress=False, | |
| ) | |
| ) | |
| for future in conc.as_completed(futures): | |
| try: | |
| future_response = future.result() | |
| print("future processed", future_response) | |
| except Exception as e: | |
| traceback.print_exception(e) | |
| pass | |
| def process_folder(folder: pathlib.Path, output_folder: pathlib.Path): | |
| if output_folder is not None and not output_folder.is_dir(): | |
| output_folder.mkdir(exist_ok=True, parents=True) | |
| with conc.ProcessPoolExecutor(max_workers=180) as executor: | |
| futures = [] | |
| for file in folder.glob("*.ndjson"): | |
| futures.append( | |
| executor.submit( | |
| process_file, file, output_folder / file.name, progress=False | |
| ) | |
| ) | |
| for future in conc.as_completed(futures): | |
| future.result() | |
| def process_file( | |
| file: pathlib.Path, | |
| output_file: typing.Optional[pathlib.Path] = None, | |
| progress: bool = True, | |
| ): | |
| fout = None | |
| if output_file: | |
| fout = open(output_file, "wb") | |
| pbar = None | |
| if progress: | |
| pbar = tqdm.tqdm() | |
| converter = WikiConverter() | |
| with open(file, "rb") as f: | |
| for line in f: | |
| try: | |
| wiki_data = orjson.loads(line) | |
| except orjson.JSONDecodeError: | |
| pass | |
| if not wiki_data["article_body"].get("wikitext"): | |
| continue | |
| figures = wikipedia_figures( | |
| BeautifulSoup(wiki_data["article_body"]["html"], "lxml"), converter | |
| ) | |
| if figures and fout: | |
| fout.write(orjson.dumps({"figure_media": figures})) | |
| fout.write(b"\n") | |
| fout.flush() | |
| if pbar is not None: | |
| pbar.update(1) | |
| if fout: | |
| fout.close() | |
| if pbar is not None: | |
| pbar.close() | |
| return output_file if output_file else None | |
| if __name__ == "__main__": | |
| app() | |