import requests
import re
from bs4 import BeautifulSoup

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36 Edg/97.0.1072.69'
}

urls = [
    f"https://www.cnblogs.com/#p{page}"
    for page in range(1, 50 + 1)

]


def craw(url):
    r = requests.get(url, headers)
    print(r.status_code)
    return r.text


def parse(html):
    soup = BeautifulSoup(html, "html.parser")
    links = soup.find_all("a", class_="post-item-title")
    return [(link["href"], link.get_text()) for link in links]


if __name__ == "__main__":
    for pic_url in parse(craw(urls[0])):
        print(pic_url)