import requests
import time
import re
import os
import lxml.html
from bs4 import BeautifulSoup
# 查询出所有的链接列表
def get_all_hrefList (startUrl,content):
    """
    获取每一章节链接，再保存到列表中进行返回
    :param url:
    :return:
    """
    print(f'开启 {startUrl}')
    hrefList = []
    # 里面包含所有的链接地址
    selector = lxml.html.fromstring(content)
    # 再次进行筛选，只要对应的链接
    toc_url_path_list = selector.xpath("//*[@id='top']/div[3]/table/tbody")
    # 对每一个 a 链接进行处理
    for url in toc_url_path_list:
        urlHrefList = url[0].xpath('//tr/td/a/text()')
        for url2 in urlHrefList:
            if (url2.endswith('.html')):
                hrefList.append(startUrl + url2)
                print('添加url:' + (startUrl + url2))
            else:
                print('不爬虫该页面')
    return hrefList

# 获取到章节和正文
def get_article(content):
    selector = lxml.html.fromstring(content)
    text_name = selector.xpath("//html/head/title/text()")[0]
    text_content = content
    text_content = text_content.replace('<br/>','')
    return text_name,text_content

# 保存
def save(bookName,text_name,text_content):
    # 制作目录
    os.makedirs(bookName,exist_ok=True)
    with open(os.path.join(bookName,text_name +".html"),'w',encoding='utf-8') as f:
        f.write(text_content)

startTime = time.time()
print('开始爬虫')
startHtmlResponse = requests.get("https://yueshushu.top/dev/")
startHtmlContent = startHtmlResponse.content.decode()
# print('获取内容页面:' + startHtmlContent)
urlList = get_all_hrefList('https://yueshushu.top/dev/',startHtmlContent)
print(f'要获取的页面:{urlList}')
for url in urlList:
    #获取每一个页面的内容
    tempResponse = requests.get(url)
    text_name,text_content = get_article(tempResponse.content.decode())
    save('E:\\dev页面_xpath',text_name,text_content)
    print(f'爬虫 {url} 并保存成功')
print('爬虫成功')
endTime = time.time()
print(f'共用时: {endTime - startTime}')
# 16.042192220687866