|
|
|
|
|
|
|
|
|
|
|
|
|
import asyncio |
|
import csv |
|
import urllib.parse |
|
|
|
from bs4 import BeautifulSoup |
|
|
|
from proxy_magic_session import get_async_session |
|
|
|
|
|
async def main(): |
|
with open("fandom_wikis_210224.csv", "w", newline="") as f: |
|
writer = csv.writer(f) |
|
session = get_async_session() |
|
root = "https://community.fandom.com" |
|
r = await session.get(f"{root}/wiki/Special:NewWikis?limit=500") |
|
if r.status_code == 200: |
|
nrow = 0 |
|
soup = BeautifulSoup(r.text, "lxml") |
|
for doms in soup.select(".mw-spcontent li > a"): |
|
href: str = doms.get("href", "") |
|
if href: |
|
parsed = urllib.parse.urlparse(href) |
|
|
|
domain = parsed.netloc |
|
if parsed.path.strip("/"): |
|
sp = f"{parsed.path}wiki/" |
|
else: |
|
sp = "/wiki/" |
|
print("Add wiki:", domain, "|", doms.get_text().strip()) |
|
writer.writerow([domain, doms.get_text().strip(), sp, 0]) |
|
nrow += 1 |
|
next_page = soup.find("a", attrs={"rel": "next", "class": "mw-nextlink"}) |
|
if next_page: |
|
next_page_url = f'{root}{next_page.get("href")}' |
|
else: |
|
next_page_url = None |
|
while next_page_url: |
|
nrow = 0 |
|
r = await session.get(next_page_url) |
|
soup = BeautifulSoup(r.text, "lxml") |
|
for doms in soup.select(".mw-spcontent li > a"): |
|
href: str = doms.get("href", "") |
|
if href: |
|
parsed = urllib.parse.urlparse(href) |
|
|
|
domain = parsed.netloc |
|
if parsed.path.strip("/"): |
|
sp = f"{parsed.path}wiki/" |
|
else: |
|
sp = "/wiki/" |
|
print("Add wiki:", domain, "|", doms.get_text().strip()) |
|
writer.writerow([domain, doms.get_text().strip(), sp, 0]) |
|
nrow += 1 |
|
next_page = soup.find( |
|
"a", attrs={"rel": "next", "class": "mw-nextlink"} |
|
) |
|
if next_page: |
|
next_page_url = f'{root}{next_page.get("href")}' |
|
else: |
|
next_page_url = None |
|
print(next_page_url) |
|
|
|
|
|
asyncio.run(main()) |
|
|