# 爬取博客数据存储到txt

import requests

from utils import url_manager
from bs4 import BeautifulSoup
import re

root_url = "https://www.cnblogs.com/obeigong"

# 正则匹配规则
pattern = r'^https://www.cnblogs.com/obeigong/p/\d+.html$'

# 正则匹配规则2
pattern2 = r'^https://www.cnblogs.com/obeigong/\?page=\d+$'


headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                  "AppleWebKit/537.36 (KHTML, like Gecko) "
                  "Chrome/107.0.0.0 Safari/398.63"}
# 实例化url资源管理器
urls = url_manager.UrlManager()
# 将博客首页地址放入爬取集合
urls.add_new_url(root_url)
# 创建一个文件
fout = open("craw_all_page.txt", "w", encoding='utf-8')
while urls.has_new_url():
    curr_url = urls.get_url()
    r = requests.get(curr_url, headers=headers,verify=True,timeout=3)
    if r.status_code != 200:
        print("error, return status_code is not 200!", curr_url)
        continue
    soup = BeautifulSoup(r.text, "html.parser")
    title = soup.title.string

    # 当前链接是文章才写入
    if re.match(pattern, curr_url):
        fout.write("%s\t%s\n"%(curr_url, title))
        fout.flush()
        print("success:%s,%s,%d"%(curr_url, title, len(urls.new_urls)))

    links = soup.find_all("a")
    for link in links:
        # if link.has_attr("href"):# has_attr 节点存在某个属性
        href = link.get("href")
        if href==None:
            continue
        # 文章详情也匹配
        if re.match(pattern, href):
            urls.add_new_url(href)
        # 列表匹配
        if re.match(pattern2, href):
            urls.add_new_url(href)

# 关闭文件
fout.close()