Ammar-Azman's picture
Upload 2 files
45431a9
from xtractor.utils import (
extractor,
read_the_json,
dumps_the_json,
jsonl_converter,
multi_threading,
)
def get_links(toplink):
soup = extractor(toplink)
divtag = soup.find(
"div",
class_="elementor-element elementor-element-aa44724 ae-grid-list overlay-h-pos-center overlay-v-pos-middle filter-align-left elementor-widget elementor-widget-ae-post-blocks-adv",
)
atag = divtag.find_all("a", href=True)
article_links = []
for aTag in atag:
if "https" in aTag["href"] and "page" not in aTag["href"]:
article_links.append(aTag["href"])
article_links = list(set(article_links))
return article_links
def get_all_links():
all_links = []
i = 0
while True:
try:
link = f"https://muftins.gov.my/fatwa-terkini/page/{i}"
links = get_links(toplink=link)
all_links.append(links)
i += 1
if len(links) == 0:
break
except:
break
all_links = [y for x in all_links for y in x]
return all_links
def get_text(idx, link):
soup = extractor(link)
div_tag = soup.find("div", class_="col-xs-9")
text = str(div_tag.text).replace("\n", " ").strip()
return text
def get_all_text(links: list):
all_text = multi_threading(get_text, links=links, worker_num=10)
return all_text
if __name__ == "__main__":
# all_links = get_all_links()
# data = {"links": all_links}
# dumps_the_json(data, json_file_name="./mufti_negeri_sem/links.json")
# print(all_links)
# print(len(all_links))
file = read_the_json("./mufti_negeri_sem/links.json")
links = file["links"]
# all_tajuk = [" ".join(x.split("/")[-2].split("-")) for x in links]
all_tajuk = [f"Page{x+1}" for x in range(len(links))]
all_text = get_all_text(links=links)
data = {"title": all_tajuk, "body": all_text}
dumps_the_json(
data, json_file_name="./mufti_negeri_sem/mufti_negeri_sem_artikel.json"
)
jsonl_converter(
json_file_path="./mufti_negeri_sem/mufti_negeri_sem_artikel.json",
json_l_file_path="./mufti_negeri_sem/mufti_negeri_sem_artikel.jsonl",
col_1_name="title",
col_2_name="body",
)