shinjiru-blog / scraper.py
Ammar-Azman's picture
Upload scraper.py
3aee80f
from xtractor.utils import (
extractor,
read_the_json,
dumps_the_json,
jsonl_converter,
multi_threading,
)
def fetch_links(link="https://www.shinjiru.com.my/blog/"):
soup = extractor(link)
div_tag = soup.find_all("div", class_="row")
# div_tag = div_tag.find_all("div", class_="col-sm-6 item")
temp = []
for divTag in div_tag:
a_tag = divTag.find_all("a", href=True)
for aTag in a_tag:
if (
"blog" in aTag["href"]
and "page" not in aTag["href"]
and "category" not in aTag["href"]
):
temp.append(aTag["href"])
all_links = list(set(temp))
return all_links
def get_all_links():
temp = []
for i in range(38):
base_link = f"https://www.shinjiru.com.my/blog/page/{i+1}"
all_links = fetch_links(link=base_link)
temp.append(all_links)
full_links = [i for x in temp for i in x]
full_links = list(set(full_links))
return full_links
def get_articles(link):
soup = extractor(link)
# div_tag = soup.find("div", class_="row")
try:
div_tag = soup.find("div", class_="col-lg")
title = div_tag.find("h1").text
p_tag = div_tag.find_all("p")
all_p_tag = [x.text for x in p_tag]
article = " ".join(all_p_tag)
return title, article
except:
pass
if "__main__" == __name__:
# full_links = get_all_links()
# print(len(full_links))
# data = {"links": full_links}
# dumps_the_json(data, json_file_name="./shinjiru/all_links_shinjiru.json")
links = read_the_json("./shinjiru/all_links_shinjiru.json")
links = links["links"]
title_ = []
body_ = []
for link in links:
try:
title, article = get_articles(link)
title_.append(title)
body_.append(article)
except:
pass
data = {"title": title_, "body": body_}
dumps_the_json(data, json_file_name="./shinjiru/shinjiru_article.json")
print("DUMPS!")
jsonl_converter(
json_file_path="./shinjiru/shinjiru_article.json",
json_l_file_path="./shinjiru/shinjiru_article.jsonl",
col_1_name="title",
col_2_name="body",
)
print("CONVERTED!")