from bs4 import BeautifulSoup
import requests
import os
import time

# os.mkdir('./sanguo')
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
}
# url = 'https://www.shicimingju.com/book/sanguoyanyi.html'

content= requests.get(url=url,headers=headers).text
soup = BeautifulSoup(content, 'lxml')
page_list = soup.select('.book-mulu > ul > li')
# print(page_list)
for each_page in page_list:
    title = each_page.a.string
    page_url = 'https://www.shicimingju.com' + each_page.a['href']
    # print(page_url)
    page_text = requests.get(page_url,headers=headers).text
    soups = BeautifulSoup(page_text,'lxml')
    div = soups.find('div',class_='chapter_content')
    text = div.text
    text_path = './sanguo/' + title + '.txt'
    print('%s    爬取成功' % title)
    with open(text_path,'w',encoding='utf-8')as f:
         f.write(text)
    # time.sleep(3)




