import requests
from bs4 import BeautifulSoup as sp
import os
import shutil
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
from urllib.parse import urljoin


# 请求HTML
def getHTMLText(url, timeout=3000) -> str:
    try:
        r = requests.get(url, timeout=timeout)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except Exception as e:
        print("Request Error: ", format(e))
        return None


# 获取小说章节列表的链接和章节名
def getUrls(baseUrl, timeout=30) -> list:
    urls = []
    text = getHTMLText(baseUrl, timeout)
    if text is not None:
        soup = sp(text, 'lxml')
        table = soup.find('table', {'cellspacing': 1, 'bgcolor': '#d4d0c8'})
        url_lists = table.findAll('a')
        urls = [(url.get_text().strip(), urljoin(baseUrl, url.attrs['href']))
                for url in url_lists]
    return urls


# 解析小说内容
def parse(text: str) -> (str, str):
    soup = sp(text, 'lxml')
    tag = soup.p.contents
    title = ''.join(soup.find('font'))
    context = ''.join([x for x in tag if len(x) > 5])
    return (title, context)


#线程入口，写入文件
def save_file(url, abspath: str):
    title = ''
    text = get_html(url)
    title, content = parse_para(text)
    with open(abspath, 'w', encoding='utf-8')as f:
            f.write(content)
    return title

def main(basepath, baseurl):
    with ThreadPoolExecutor(max_workers=5):
        executor = ThreadPoolExecutor(5)
        futures = []
        try:
            os.mkdir(basepath)
        except:
            print('basebath目录创建成功')
        start = time.time()
        for title, url in get_url(baseurl, 30):
            abspath = os.path.join(basepath, title + '.txt')
            print(abspath, " 开始下载: ", url)
            futures.append(executor.submit(save_file, url, abspath))
        end = time.time() - start
        print("《球状闪电》爬取，程序运行时间: ", end)


if __name__ == '__main__':
    savePath = r'E:/小说/球状闪电'
    baseUrl = r'http://www.yahoowe.com/book3/6633/'
    timeout = 30
    main(savePath, baseUrl, time)

