import requests
from bs4 import BeautifulSoup
start_url = 'https://blog.qingke.me/channel/4'

def craw_one_page(url, file):
    '''抓取参数指定的的网址，把数据保存在参数指定的文件中
       并返回下一页的URL，如果没有下一页，就返回None'''
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')
    for li in soup.find_all('li', {"class": "content"}):
        title = li.a.get_text()
        intro = li.find('div', {"class": "item-text"}).get_text()
        file.write(f'{title}\n{intro}\n')
        file.write('==================\n') 
    
    #抓取下一页URL并返回，默认设置下一页地址为None，表示没有下一页
    next_url = None

    #查找下一页的a标签
    next_a = soup.find('a', {'class':'next'})
    #如果是最后一页了，下一页a标签是不存在的，所以要判断一下，否则会抛出异常
    if next_a:
        next_url = f'{start_url}{next_a["href"]}'
    return next_url


with open('blog.txt', 'w') as file:
    next_url = start_url
    #循环抓取，知道下一页地址为None
    while(next_url):
        print(f'开始抓取：{next_url}')
        next_url = craw_one_page(next_url, file)
    print('没有下一页了，爬虫程序结束')
