|
from xtractor.utils import ( |
|
extractor, |
|
read_the_json, |
|
dumps_the_json, |
|
multi_threading, |
|
jsonl_converter, |
|
) |
|
|
|
|
|
def get_title_with_link(link): |
|
soup = extractor(link) |
|
|
|
links = [] |
|
|
|
try: |
|
divtag = soup.find("table", class_="category table table-bordered") |
|
a_tag = divtag.find_all("a", href=True) |
|
|
|
for aTag in a_tag: |
|
if "index" in aTag["href"]: |
|
href = aTag["href"] |
|
full_link = "https://muftiperlis.gov.my" + href |
|
links.append(full_link) |
|
except: |
|
pass |
|
return links |
|
|
|
|
|
def get_all_links(): |
|
article_links_dict = {"links": []} |
|
i = 0 |
|
while True: |
|
try: |
|
all_links = get_title_with_link( |
|
f"https://muftiperlis.gov.my/index.php/himpunan-fatwa-negeri?start={i}0" |
|
) |
|
i += 1 |
|
article_links_dict["links"].append(all_links) |
|
except: |
|
break |
|
return article_links_dict |
|
|
|
|
|
def get_text_article(idx, link: str) -> str: |
|
soup = extractor(link) |
|
divtag = soup.find("div", class_="article-details") |
|
text = str(divtag.text).replace("\n", " ").replace("\t", " ").strip() |
|
|
|
return text |
|
|
|
|
|
def get_text_multithreading(links): |
|
job = multi_threading(get_text_article, links, worker_num=5) |
|
return job |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
jsonl_converter( |
|
"./mufti_perlis/mufti_perlis_artikel.json", |
|
json_l_file_path="./mufti_perlis/mufti_perlis_artikel.jsonl", |
|
col_1_name="title", |
|
col_2_name="body", |
|
) |
|
|