crawl-mufti-perlis / scraper.py
Ammar-Azman's picture
Upload 2 files
bbf8dac
from xtractor.utils import (
extractor,
read_the_json,
dumps_the_json,
multi_threading,
jsonl_converter,
)
def get_title_with_link(link):
soup = extractor(link)
# divtag = soup.find("div", class_="content-category")
links = []
try:
divtag = soup.find("table", class_="category table table-bordered")
a_tag = divtag.find_all("a", href=True)
for aTag in a_tag:
if "index" in aTag["href"]:
href = aTag["href"]
full_link = "https://muftiperlis.gov.my" + href
links.append(full_link)
except:
pass
return links
def get_all_links():
article_links_dict = {"links": []}
i = 0
while True:
try:
all_links = get_title_with_link(
f"https://muftiperlis.gov.my/index.php/himpunan-fatwa-negeri?start={i}0"
)
i += 1
article_links_dict["links"].append(all_links)
except:
break
return article_links_dict
def get_text_article(idx, link: str) -> str:
soup = extractor(link)
divtag = soup.find("div", class_="article-details")
text = str(divtag.text).replace("\n", " ").replace("\t", " ").strip()
# text = text[text.find("KEPUTUSAN") :]
return text
def get_text_multithreading(links):
job = multi_threading(get_text_article, links, worker_num=5)
return job
if __name__ == "__main__":
# article_links_dict = get_all_links()
# dumps_the_json(
# article_links_dict, json_file_name="./mufti_perlis/article_links.json"
# )
# article_dict = read_the_json("./mufti_perlis/article_links.json")
# link_list = article_dict["links"]
# article_flat_list_title = [
# " ".join(y.split("/")[-1].split("-")[1:]) for x in link_list for y in x
# ]
# article_flat_list = [y for x in link_list for y in x]
# all_text = get_text_multithreading(links=article_flat_list)
# # print(len(article_flat_list), len(all_text))
# data = {"title": article_flat_list_title, "body": all_text}
# dumps_the_json(data, json_file_name="mufti_perlis_artikel.json")
jsonl_converter(
"./mufti_perlis/mufti_perlis_artikel.json",
json_l_file_path="./mufti_perlis/mufti_perlis_artikel.jsonl",
col_1_name="title",
col_2_name="body",
)