import requests
import time
from multiprocessing.dummy import Pool
import re
import os
# 查询出所有的链接列表
def get_all_hrefList (startUrl,content):
    """
    获取每一章节链接，再保存到列表中进行返回
    :param url:
    :return:
    """
    print(f'开启 {startUrl}')
    hrefList = []
    # 里面包含所有的链接地址
    toc_book = re.findall('<tbody>(.*?)</tbody',content,re.S)[0]
    print('toc_book:' + toc_book)
    # 再次进行筛选，只要对应的链接
    toc_url = re.findall("ref='(.*?)'",toc_book,re.S)
    print(f'所有的页面数据:{toc_url}')
    # 对每一个 a 链接进行处理
    for url in toc_url:
        if (url == 'https://beian.miit.gov.cn/'):
            print('不爬虫该页面')
        else:
            hrefList.append(startUrl + url)
            print('添加url:' + (startUrl + url))
    return hrefList

# 获取到章节和正文
def get_article(content):
    text_name = re.search('title>(.*?)</title', content,re.S).group(1)
    # text_content = re.search('<p>(.*?)</p>',content,re.S).group(1)
    text_content = content
    text_content = text_content.replace('<br/>','')
    return text_name,text_content

# 保存
def save(bookName,text_name,text_content):
    # 制作目录
    os.makedirs(bookName,exist_ok=True)
    with open(os.path.join(bookName,text_name +".html"),'w',encoding='utf-8') as f:
        f.write(text_content)
def saveUrl(url):
    # 获取每一个页面的内容
    tempResponse = requests.get(url)
    text_name, text_content = get_article(tempResponse.content.decode())
    save('E:\\dev页面', text_name, text_content)
    print(f'爬虫 {url} 并保存成功')
startTime = time.time()
print('开始爬虫')
startHtmlResponse = requests.get("https://yueshushu.top/dev/")
startHtmlContent = startHtmlResponse.content.decode()
# print('获取内容页面:' + startHtmlContent)
urlList = get_all_hrefList('https://yueshushu.top/dev/',startHtmlContent)
print(f'要获取的页面:{urlList}')
pool = Pool(10)
pool.map(saveUrl,urlList)
print('爬虫成功')
endTime = time.time()
print(f'共用时: {endTime - startTime}')
# 1.664634