Ammar-Azman commited on
Commit
45431a9
1 Parent(s): db1d8ec

Upload 2 files

Browse files
Files changed (2) hide show
  1. mufti_negeri_sem_artikel.jsonl +0 -0
  2. scraper.py +84 -0
mufti_negeri_sem_artikel.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
scraper.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from xtractor.utils import (
2
+ extractor,
3
+ read_the_json,
4
+ dumps_the_json,
5
+ jsonl_converter,
6
+ multi_threading,
7
+ )
8
+
9
+
10
+ def get_links(toplink):
11
+ soup = extractor(toplink)
12
+
13
+ divtag = soup.find(
14
+ "div",
15
+ class_="elementor-element elementor-element-aa44724 ae-grid-list overlay-h-pos-center overlay-v-pos-middle filter-align-left elementor-widget elementor-widget-ae-post-blocks-adv",
16
+ )
17
+ atag = divtag.find_all("a", href=True)
18
+
19
+ article_links = []
20
+ for aTag in atag:
21
+ if "https" in aTag["href"] and "page" not in aTag["href"]:
22
+ article_links.append(aTag["href"])
23
+
24
+ article_links = list(set(article_links))
25
+ return article_links
26
+
27
+
28
+ def get_all_links():
29
+ all_links = []
30
+ i = 0
31
+ while True:
32
+ try:
33
+ link = f"https://muftins.gov.my/fatwa-terkini/page/{i}"
34
+ links = get_links(toplink=link)
35
+ all_links.append(links)
36
+ i += 1
37
+ if len(links) == 0:
38
+ break
39
+ except:
40
+ break
41
+
42
+ all_links = [y for x in all_links for y in x]
43
+ return all_links
44
+
45
+
46
+ def get_text(idx, link):
47
+ soup = extractor(link)
48
+
49
+ div_tag = soup.find("div", class_="col-xs-9")
50
+ text = str(div_tag.text).replace("\n", " ").strip()
51
+ return text
52
+
53
+
54
+ def get_all_text(links: list):
55
+ all_text = multi_threading(get_text, links=links, worker_num=10)
56
+ return all_text
57
+
58
+
59
+ if __name__ == "__main__":
60
+ # all_links = get_all_links()
61
+ # data = {"links": all_links}
62
+ # dumps_the_json(data, json_file_name="./mufti_negeri_sem/links.json")
63
+ # print(all_links)
64
+ # print(len(all_links))
65
+
66
+ file = read_the_json("./mufti_negeri_sem/links.json")
67
+ links = file["links"]
68
+
69
+ # all_tajuk = [" ".join(x.split("/")[-2].split("-")) for x in links]
70
+ all_tajuk = [f"Page{x+1}" for x in range(len(links))]
71
+ all_text = get_all_text(links=links)
72
+
73
+ data = {"title": all_tajuk, "body": all_text}
74
+
75
+ dumps_the_json(
76
+ data, json_file_name="./mufti_negeri_sem/mufti_negeri_sem_artikel.json"
77
+ )
78
+
79
+ jsonl_converter(
80
+ json_file_path="./mufti_negeri_sem/mufti_negeri_sem_artikel.json",
81
+ json_l_file_path="./mufti_negeri_sem/mufti_negeri_sem_artikel.jsonl",
82
+ col_1_name="title",
83
+ col_2_name="body",
84
+ )