from bs4 import BeautifulSoup
import lxml
import requests

# 1、将本地的HTML页面数据加载到bs对象中
# fp = open('sougou.html', 'r', encoding='utf-8')
# soup = BeautifulSoup(fp, 'lxml')    # bs对象实例化

# 2、将互联网获取到的页面数据加载到bs对象中
# page_text = responce.text
# soup = BeautifulSoup(page_text, 'lxml')

headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36 Edg/89.0.774.68'
    }
url = 'http://mm.shuhai.com/read/46388/{}.html'
fp = open('read.txt', 'a')
for i in range(1,100):    # 下载前100章
    responce = requests.get(url=url.format(i), headers=headers).text
    # print(responce)
    soup = BeautifulSoup(responce, 'lxml')
    title = soup.find('div', class_='chapter-item').text
    # print(title)
    fp.write(title)
    print('第{}章下载成功！'.format(i))
fp.close()
print('小说保存成功！！！')

