Ammar-Azman commited on
Commit
bbf8dac
1 Parent(s): 4ab3e76

Upload 2 files

Browse files
Files changed (2) hide show
  1. mufti_perlis_artikel.jsonl +0 -0
  2. scraper.py +79 -0
mufti_perlis_artikel.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
scraper.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from xtractor.utils import (
2
+ extractor,
3
+ read_the_json,
4
+ dumps_the_json,
5
+ multi_threading,
6
+ jsonl_converter,
7
+ )
8
+
9
+
10
+ def get_title_with_link(link):
11
+ soup = extractor(link)
12
+ # divtag = soup.find("div", class_="content-category")
13
+ links = []
14
+
15
+ try:
16
+ divtag = soup.find("table", class_="category table table-bordered")
17
+ a_tag = divtag.find_all("a", href=True)
18
+
19
+ for aTag in a_tag:
20
+ if "index" in aTag["href"]:
21
+ href = aTag["href"]
22
+ full_link = "https://muftiperlis.gov.my" + href
23
+ links.append(full_link)
24
+ except:
25
+ pass
26
+ return links
27
+
28
+
29
+ def get_all_links():
30
+ article_links_dict = {"links": []}
31
+ i = 0
32
+ while True:
33
+ try:
34
+ all_links = get_title_with_link(
35
+ f"https://muftiperlis.gov.my/index.php/himpunan-fatwa-negeri?start={i}0"
36
+ )
37
+ i += 1
38
+ article_links_dict["links"].append(all_links)
39
+ except:
40
+ break
41
+ return article_links_dict
42
+
43
+
44
+ def get_text_article(idx, link: str) -> str:
45
+ soup = extractor(link)
46
+ divtag = soup.find("div", class_="article-details")
47
+ text = str(divtag.text).replace("\n", " ").replace("\t", " ").strip()
48
+ # text = text[text.find("KEPUTUSAN") :]
49
+ return text
50
+
51
+
52
+ def get_text_multithreading(links):
53
+ job = multi_threading(get_text_article, links, worker_num=5)
54
+ return job
55
+
56
+
57
+ if __name__ == "__main__":
58
+ # article_links_dict = get_all_links()
59
+ # dumps_the_json(
60
+ # article_links_dict, json_file_name="./mufti_perlis/article_links.json"
61
+ # )
62
+
63
+ # article_dict = read_the_json("./mufti_perlis/article_links.json")
64
+ # link_list = article_dict["links"]
65
+ # article_flat_list_title = [
66
+ # " ".join(y.split("/")[-1].split("-")[1:]) for x in link_list for y in x
67
+ # ]
68
+ # article_flat_list = [y for x in link_list for y in x]
69
+ # all_text = get_text_multithreading(links=article_flat_list)
70
+ # # print(len(article_flat_list), len(all_text))
71
+ # data = {"title": article_flat_list_title, "body": all_text}
72
+ # dumps_the_json(data, json_file_name="mufti_perlis_artikel.json")
73
+
74
+ jsonl_converter(
75
+ "./mufti_perlis/mufti_perlis_artikel.json",
76
+ json_l_file_path="./mufti_perlis/mufti_perlis_artikel.jsonl",
77
+ col_1_name="title",
78
+ col_2_name="body",
79
+ )