import os

import requests, re
import concurrent
from concurrent.futures import ThreadPoolExecutor


def validateTitle(title):
    rstr = r"[\/\\\:\*\?\"\<\>\|]"  # '/ \ : * ? " < > |'
    new_title = re.sub(rstr, "_", title)  # 替换为下划线
    return new_title


headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}


# 获取一个章节的文字
# url = 'https://www.kenshu.cc/xiaoshuo/78774/39281750/'
def get_chapter_txt(url):
    res_html = requests.get(url, headers)
    res_html.encoding = res_html.apparent_encoding
    res_html_txt = res_html.text
    # print(res_html_txt)
    html_txt = re.findall(r'<div class="article-con">(.*?)</div>.*?<div class="articlebtn">', res_html_txt, re.S)[0]
    chapter = validateTitle(re.findall(r'<h1 class="article-title">(.*?)</h1>', res_html_txt, re.S)[0])
    title = validateTitle(re.findall(r'上一章</a><a href=".*?">(.*?)</a><a href=".*?">下一章', res_html_txt, re.S)[0])
    # print(html_txt)
    # print(chapter)
    # print(title)
    # 处理后文字
    txt = html_txt.replace('&nbsp;', '').replace('<br />', '').replace('<br>', '')

    path = os.path.exists('C:\\Users\\marrido\Desktop\\xs\\' + title)
    if path == False:
        os.makedirs('C:\\Users\\marrido\Desktop\\xs\\' + title)
    else:
        print(title + '文件夹已创建直接写入文件')

    with open('C:\\Users\\marrido\Desktop\\xs\\' + title + '\\' + chapter + '.txt', 'w+',
              encoding='utf-8', errors='ignore') as f:
        f.write(chapter +'\n'+ txt)
        print('写入---' + title + '---' + chapter + '成功')








# 获取一本书的所有章节的url+章节名
def get_chapter(url):
    res = requests.get(url, headers=headers)
    res.encoding = res.apparent_encoding
    html = res.text
    htmlmsg = re.findall(r'<li><span><a href="(.*?)">.*?</a></span></li>', html, re.S)
    htmlmsg = ['https://www.kenshu.cc{}'.format(i) for i in htmlmsg]
    # print(htmlmsg)
    return htmlmsg


# 下载所有小说文字
# urls = 'https://www.kenshu.cc/xiaoshuo/78774/0/'
def download_all_txt(urls):
    with concurrent.futures.ProcessPoolExecutor(max_workers=10) as exector:
        for url in urls:
            exector.submit(get_chapter_txt, url)


# 运行逻辑
if __name__ == '__main__':
    # get_chapter_txt(url)
    # get_chapter(urls)

    all_title_url = ['https://www.kenshu.cc/nvsheng/{}/'.format(i) for i in range(1, 2)]
    # print(all_title_url)
    urltitles = []
    for urla in all_title_url:
        res = requests.get(urla, headers=headers)
        res.encoding = 'gbk'
        html = res.text
        htmlmsg = re.findall(r'<p style="height: 25px;">.*?<a href="(.*?)" class="booktitle">.*?</a></p>', html, re.S)

        for urltitle in htmlmsg:
            urltitles.append(urltitle)
    # 获得到所有的文章url urltitles

    urltitles = ['https://www.kenshu.cc{}0'.format(i) for i in urltitles]
    print(urltitles)

    for ii in urltitles:
        # 线程池下载
        urls = get_chapter(ii)
        download_all_txt(urls)
        # 一个一个下
        # for iii in urls:
        #     get_chapter_txt(iii)

########################################################################################################################


#
# # url = 'https://www.ranwen8.com/book/117427.html'
#
# # 获取一本书的所有章节的url+章节名
# def get_chapter(url):
#     res = requests.get(url,headers=headers)
#     res.encoding = 'gbk'
#     html = res.text
#     htmlmsg = re.findall(r'<dd class="col-md-3"><a href="(.*?)" title=".*?">.*?</a></dd>',html,re.S)
#     htmlmsg = ['https://www.ranwen8.com{}'.format(i) for i in htmlmsg]
#     # print(htmlmsg)
#     return htmlmsg
#
#
# # 获取一个章节的文字
# def get_chapter_txt(url):
#     # url ='https://www.ranwen8.com/book/117427/43852875.html'
#     res = requests.get(url,headers=headers)
#     res.encoding = 'gbk'
#     html = res.text
#     # print(html)
#     title = validateTitle(re.findall(r'<h1 class="readTitle">(.*?)<small></small></h1>', html, re.S)[0]).strip()
#     name = validateTitle(re.findall(r'<meta name="apple-mobile-web-app-title" content="天域神座">',html,re.S)[0])
#     print(name)
#     try:
#         os.makedirs('C:\\Users\\marrido\Desktop\\xs1\\' + name)
#     except EOFError:
#         pass
#     txt = re.findall(r'<div class="panel-body" id="htmlContent">(.*?)</div>', html, re.S)[0]
#     ad = re.findall(r'<div class="panel-body" id="htmlContent">(.*?)<br><br>', html, re.S)[0]
#     # 清洗数据
#     txt = txt.replace(ad,'').replace('&nbsp;','').replace('<br />','').replace('<br>','')
#     # print(txt)
#     # 写入数据
#     with open('C:\\Users\\marrido\Desktop\\xs1\\' + name+'\\'+title +'.txt', 'w+', encoding='gbk', errors='ignore') as f:
#         f.write(title + '\n' + txt)
#
# # 下载所有小说文字
# def download_all_txt(urls):
#     with concurrent.futures.ProcessPoolExecutor(max_workers=5) as exector:
#         for url in urls:
#             exector.submit(get_chapter_txt, url)
#
#
#
# if __name__ == '__main__':
#
#     all_title_url = ['https://www.ranwen8.com/wanben/{}'.format(i) for i in range(1, 5)]
#
#     urltitles = []
#     for urla in all_title_url:
#         res = requests.get(urla,headers=headers)
#         res.encoding = 'gbk'
#         html = res.text
#         htmlmsg = re.findall(r'<td><a href="(.*?)" title=',html,re.S)
#
#         for urltitle in htmlmsg:
#             urltitles.append(urltitle)
#     # 获得到所有的文章url urltitles
#
#     for ii in urltitles:
#
#         urls = get_chapter(ii)
#         download_all_txt(urls)
