File size: 3,960 Bytes
c0c3c81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
# https://devopedia.org/site-map/list-articles?page=2&action=prev-page&tag=0&ajax=1
import asyncio
import pathlib
import httpx
import markdownify
import orjson
from bs4 import BeautifulSoup
root = pathlib.Path(
"data"
)
root.mkdir(exist_ok=True, parents=True)
class md_nolinks(markdownify.MarkdownConverter):
def convert_a(self, _, text, __):
_, _, text = markdownify.chomp(text)
if not text:
return ""
return text
def convert_img(self, el, text, convert_as_inline):
return ""
md = md_nolinks()
async def index():
session = httpx.AsyncClient()
session.headers["user-agent"] = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36"
)
if not (root / pathlib.Path("dev_index.json")).exists():
dev_urls = set()
for idx in range(0, 20):
c = await session.get(
f"https://devopedia.org/site-map/list-articles?page={idx}&action=next-page&tag=0&ajax=1"
)
soup = BeautifulSoup(c.content, "lxml")
for href in [
a["href"] for a in soup.select("div.dev-events div.uk-panel > div > a")
]:
dev_urls.add(f"https://devopedia.org{href}")
(root / pathlib.Path("dev_index.json")).write_bytes(
orjson.dumps(list(dev_urls), option=orjson.OPT_INDENT_2)
)
else:
dev_urls = orjson.loads(pathlib.Path("dev_index.json").read_bytes())
final_content = []
for url in dev_urls:
c = await session.get(url, timeout=None)
soup = BeautifulSoup(c.text, "lxml")
main = soup.select_one("main div.article-middle")
print(url)
dev_content = []
if main:
h1 = soup.find("h1", attrs={"class": "uk-article-title"})
for section in main.select("section"):
[i.decompose() for i in section.find_all("figure")]
[
i.decompose()
for i in section.find_all("sup", attrs={"class": "inline-citation"})
]
# print(section.get("id"))
if section.get("id") and "summary" in section.get("id").lower():
fmt = f"## Summary\n\n{md.convert_soup(section).rstrip()}"
dev_content.append(fmt)
if section.get("id") and "discussion" in section.get("id").lower():
z = "## Discussion"
for qa in section.find("ul", recursive=False).find_all(
"li", recursive=False
):
q = qa.find("article-question")
a = qa.find("article-answer")
fmt = f"### {q.get_text()}\n\n{md.convert_soup(a)}"
z += f"\n\n{fmt}"
dev_content.append(z)
if section.get("id") and "milestone" in section.get("id").lower():
section.find("h2").decompose()
fmt = f"\n\n## Milestones\n\n{md.convert_soup(section).strip()}"
dev_content.append(fmt)
final_content.append(
orjson.dumps(
{
"text": f"# {h1.get_text()}\n\n"
+ ("\n\n".join(dev_content))
.replace("\n\n\n", "\n\n")
.replace("\n\n\n\n", "\n\n")
.replace("\r\n", "\n")
.replace("\t", " "),
"meta": {
"title": h1.get_text(),
"href": f"{url.split('/')[-1]}",
},
}
)
)
else:
raise Exception
pathlib.Path("dev_files.jsonl").write_bytes(b"\n".join(final_content))
if __name__ == "__main__":
asyncio.run(index())
|