File size: 2,289 Bytes
3aee80f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from xtractor.utils import (
    extractor,
    read_the_json,
    dumps_the_json,
    jsonl_converter,
    multi_threading,
)


def fetch_links(link="https://www.shinjiru.com.my/blog/"):
    soup = extractor(link)
    div_tag = soup.find_all("div", class_="row")
    # div_tag = div_tag.find_all("div", class_="col-sm-6 item")

    temp = []
    for divTag in div_tag:
        a_tag = divTag.find_all("a", href=True)
        for aTag in a_tag:
            if (
                "blog" in aTag["href"]
                and "page" not in aTag["href"]
                and "category" not in aTag["href"]
            ):
                temp.append(aTag["href"])

    all_links = list(set(temp))
    return all_links


def get_all_links():
    temp = []
    for i in range(38):
        base_link = f"https://www.shinjiru.com.my/blog/page/{i+1}"
        all_links = fetch_links(link=base_link)
        temp.append(all_links)

    full_links = [i for x in temp for i in x]
    full_links = list(set(full_links))
    return full_links


def get_articles(link):
    soup = extractor(link)
    # div_tag = soup.find("div", class_="row")
    try:
        div_tag = soup.find("div", class_="col-lg")

        title = div_tag.find("h1").text
        p_tag = div_tag.find_all("p")

        all_p_tag = [x.text for x in p_tag]
        article = " ".join(all_p_tag)

        return title, article
    except:
        pass


if "__main__" == __name__:
    # full_links = get_all_links()
    # print(len(full_links))
    # data = {"links": full_links}
    # dumps_the_json(data, json_file_name="./shinjiru/all_links_shinjiru.json")

    links = read_the_json("./shinjiru/all_links_shinjiru.json")
    links = links["links"]

    title_ = []
    body_ = []
    for link in links:
        try:
            title, article = get_articles(link)
            title_.append(title)
            body_.append(article)
        except:
            pass

    data = {"title": title_, "body": body_}

    dumps_the_json(data, json_file_name="./shinjiru/shinjiru_article.json")
    print("DUMPS!")
    jsonl_converter(
        json_file_path="./shinjiru/shinjiru_article.json",
        json_l_file_path="./shinjiru/shinjiru_article.jsonl",
        col_1_name="title",
        col_2_name="body",
    )
    print("CONVERTED!")