import os

import requests, re
import concurrent
from concurrent.futures import ThreadPoolExecutor


def validateTitle(title):
    rstr = r"[\/\\\:\*\?\"\<\>\|]"  # '/ \ : * ? " < > |'
    new_title = re.sub(rstr, "_", title)  # 替换为下划线
    return new_title


headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}


# 获取一个章节的文字
# url = 'https://www.kenshu.cc/xiaoshuo/78774/39281750/'
def get_chapter_txt(url):
    res_html = requests.get(url, headers)
    res_html.encoding = res_html.apparent_encoding
    res_html_txt = res_html.text
    # print(res_html_txt)
    html_txt = re.findall(r'<div class="article-con">(.*?)</div>.*?<div class="articlebtn">', res_html_txt, re.S)[0]
    chapter = validateTitle(re.findall(r'<h1 class="article-title">(.*?)</h1>', res_html_txt, re.S)[0])
    title = validateTitle(re.findall(r'上一章</a><a href=".*?">(.*?)</a><a href=".*?">下一章', res_html_txt, re.S)[0])
    # print(html_txt)
    # print(chapter)
    # print(title)
    # 处理后文字
    txt = html_txt.replace('&nbsp;', '').replace('<br />', '').replace('<br>', '')

    path = os.path.exists('C:\\Users\\marrido\Desktop\\xs\\' + title)
    if path == False:
        os.makedirs('C:\\Users\\marrido\Desktop\\xs\\' + title)
    else:
        print(title + '文件夹已创建直接写入文件')

    with open('C:\\Users\\marrido\Desktop\\xs\\' + title + '\\' + chapter + '.txt', 'w+',
              encoding='utf-8', errors='ignore') as f:
        f.write(chapter +'\n'+ txt)
        print('写入---' + title + '---' + chapter + '成功')


# 获取一本书的所有章节的url+章节名
url = 'https://www.kenshu.cc/xiaoshuo/1684/0/'
def get_chapter(url):
    res = requests.get(url, headers=headers)
    res.encoding = res.apparent_encoding
    html = res.text
    htmlmsg = re.findall(r'<li><span><a href="(.*?)">.*?</a></span></li>', html, re.S)
    htmlmsg = ['https://www.kenshu.cc{}'.format(i) for i in htmlmsg]
    # print(htmlmsg)
    return htmlmsg


# 下载所有小说文字
# urls = 'https://www.kenshu.cc/xiaoshuo/78774/0/'
def download_all_txt(urls):
    with concurrent.futures.ProcessPoolExecutor(max_workers=5) as exector:
        for url in urls:
            exector.submit(get_chapter_txt, url)


# 运行逻辑
if __name__ == '__main__':


    # 线程池下载
    urls = get_chapter(url)
    download_all_txt(urls)

