File size: 2,358 Bytes
bbf8dac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from xtractor.utils import (
    extractor,
    read_the_json,
    dumps_the_json,
    multi_threading,
    jsonl_converter,
)


def get_title_with_link(link):
    soup = extractor(link)
    # divtag = soup.find("div", class_="content-category")
    links = []

    try:
        divtag = soup.find("table", class_="category table table-bordered")
        a_tag = divtag.find_all("a", href=True)

        for aTag in a_tag:
            if "index" in aTag["href"]:
                href = aTag["href"]
                full_link = "https://muftiperlis.gov.my" + href
                links.append(full_link)
    except:
        pass
    return links


def get_all_links():
    article_links_dict = {"links": []}
    i = 0
    while True:
        try:
            all_links = get_title_with_link(
                f"https://muftiperlis.gov.my/index.php/himpunan-fatwa-negeri?start={i}0"
            )
            i += 1
            article_links_dict["links"].append(all_links)
        except:
            break
    return article_links_dict


def get_text_article(idx, link: str) -> str:
    soup = extractor(link)
    divtag = soup.find("div", class_="article-details")
    text = str(divtag.text).replace("\n", " ").replace("\t", " ").strip()
    # text = text[text.find("KEPUTUSAN") :]
    return text


def get_text_multithreading(links):
    job = multi_threading(get_text_article, links, worker_num=5)
    return job


if __name__ == "__main__":
    # article_links_dict = get_all_links()
    # dumps_the_json(
    #     article_links_dict, json_file_name="./mufti_perlis/article_links.json"
    # )

    # article_dict = read_the_json("./mufti_perlis/article_links.json")
    # link_list = article_dict["links"]
    # article_flat_list_title = [
    #     " ".join(y.split("/")[-1].split("-")[1:]) for x in link_list for y in x
    # ]
    # article_flat_list = [y for x in link_list for y in x]
    # all_text = get_text_multithreading(links=article_flat_list)
    # # print(len(article_flat_list), len(all_text))
    # data = {"title": article_flat_list_title, "body": all_text}
    # dumps_the_json(data, json_file_name="mufti_perlis_artikel.json")

    jsonl_converter(
        "./mufti_perlis/mufti_perlis_artikel.json",
        json_l_file_path="./mufti_perlis/mufti_perlis_artikel.jsonl",
        col_1_name="title",
        col_2_name="body",
    )