import threading
import configparser
from concurrent.futures import ThreadPoolExecutor

import requests
import re
import os
import html
from bs4 import BeautifulSoup
import lxml



def get_html(url):
    """
    发送请求并解析html
    :param url: 请求地址
    :return: BeautifulSoup格式化的html
    """
    r = requests.get(url)
    if r.status_code == 200:
        soup = BeautifulSoup(r.text, 'lxml')
    else:
        soup = 400
    return soup

def get_text(html,url,bk,auto):
    """
    根据章节url获取每个章节分页下的所有章节正文内容
    :param html: 首页网址
    :param url: 书籍地址
    :param bk: 书名
    :param auto: 作者
    :return: 分页内所有章节正文内容
    """
    soup = get_html(html+url)
    bp = soup.select('.section-list')[1]
    chapTitleList = bp.select('a')
    chapSumText = bk + "\r" + "作者:" + auto + "\r"
    #循环获取每一章
    for i in chapTitleList:
        print("获取第" + str(i) + "章内容")
        chapText = "\n\n" + i.text + "\n\n" #每一章节的内容
        h_url = html + i['href']
        cp = get_html(h_url)
        # 获取当前章总的页数
        try:
            zfy = cp.select('h1.title')[0].text
            num = int(re.findall('/(.*?)\）', zfy)[-1])
        except BaseException as e:
            num = 1
            print(str(e))
        #循环获取章节每一页正文内容
        for h in range(1,num+1):
            zj_index = h_url.index('.html')
            part1 = h_url[:zj_index]
            part2 = h_url[zj_index:]
            zj_url = part1 + '_' + str(h) + part2
            #取每一页的正文内容
            dp = get_html(zj_url)
            textList = dp.select('p')
            #循环当前页的每一行
            for j in textList[:-1]:#不取最后一行解释说明
                chapText += j.text
        chapSumText += chapText
    return chapSumText

def get_all_text(html,url,bk,auto):
    book_text = bk + '\r' + bk + '\r' + auto + '\n\n'
    soup = get_html(html + url)
    second_dt = soup.select_one('dl > dt:nth-of-type(2)')
    # 如果第二个 dt 存在，找到其后面的所有 dd 元素
    if second_dt:
        dd_elements = second_dt.find_next_siblings('dd')
        for dd in dd_elements:
            a_tag = dd.find('a')
            if a_tag and 'href' in a_tag.attrs:
                print(dd.text)
                book_text += dd.text + '\n\n'
                zj_url = html + a_tag['href']

                while True:
                    chapText = ""  # 每一章所有分页的全部内容
                    dp = get_html(zj_url)
                    textList = dp.select('#content p')
                    # 循环当前页的每一行
                    for j in textList[:-1]:  # 不取最后一行解释说明
                        chapText += j.text + '\n\n'
                    book_text += chapText
                    next = dp.select('.next')[0]
                    if next and '下一页' in next.text:
                        zj_url = html + next['href']
                    else:
                        break
    return book_text

def get_book_chapter(html,url,bk,auto):
    """
    根据书籍url获取所有分页下章节的内容
    """
    soup = get_html(html+url)
    chapterList = soup.select('option')
    book_all_text = ''
    if len(chapterList) == 0:
        book_all_text = get_all_text(html,url,bk,auto)
    else:
        #循环所有章节分页
        for i in chapterList:
            chapterUrl = i['value']
            book_all_text += get_text(html,chapterUrl,bk,auto)
    return book_all_text

def  get_book(html,fold,bookUrl,bookName,auto):
    """
    根据书链url获取书内容,保存到指定分类的文件夹
    :param html: 网址
    :param fold: 分类
    :param bookUrl: 书url
    :param bookName: 书名
    :param auto: 作者
    """
    #一本书籍的所有内容
    book = get_book_chapter(html,bookUrl,bookName,auto)
    savePath = config['config']['path'] + fold + '/' + bookName + '.txt'
    try:
        zfile = open(savePath, mode='w', encoding='utf-8')
        zfile.write(book)
        zfile.close()
        print(bookName + '-----已完成下载')
        return bookName
    except BaseException as e:
        print(e)
        return 'N'

def get_booklist(html):
    """
    根据网址获取最近更新的小说列表
    :param html: 网址
    """
    threads = []
    foldNameList = []
    bookNameList = []
    autoNameList = []
    one_url = html + str(config['config']['qb_booklist']) # '/booklist/'   #/quanben/booklist/   全本列表地址
    soup = get_html(one_url)
    z = soup.select('.pagination a')  # 分页
    lastfy = int(z[-1].text)
    # for i in range(1,lastfy+1):  todo
    for i in range(1, 2):#先得到所有书的链接
        list_url = one_url + str(i) + '.html'
        bp = get_html(list_url)
        #当前书籍分页下所有的书URL链接
        foldNameList += bp.select('.txt-list-row5 span.s1')#分类作为文件夹名
        bookNameList += bp.select('.txt-list-row5 span.s2 a')#链接和书名
        autoNameList += bp.select('.txt-list-row5 span.s4')#作者
        # get_book(html,foldNameList,bookNameList,autoNameList) #弃用原来的分页查询,改为一次性查完所有书的链接
    #创建文件夹路径
    saveFold = config['config']['path']
    if not os.path.exists(saveFold):
        os.makedirs(saveFold)
    foldNameSet = list(set(foldNameList))
    for i in foldNameSet:
        if not os.path.exists(saveFold + i.text):
            os.makedirs(saveFold + i.text)
    #启动线程,获取书内容,并保存成对应分类下的txt
    results = []
    with ThreadPoolExecutor(max_workers=8) as pool:
        for j in range(len(bookNameList)):
            a = bookNameList[j]
            bookUrl = a['href']
            bookName = a.text
            auto = autoNameList[j].text
            fold = foldNameList[j].text
            # threads.append(threading.Thread(target=get_book,args=(html,fold,bookUrl,bookName,auto)))
            future = pool.submit(get_book, html,fold,bookUrl,bookName,auto)
            results.append(future)

    # for j in threads:
    #     j.start()
    # for j in threads:
    #     j.join()


try:
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    config = configparser.ConfigParser()
    config.read(os.path.join(BASE_DIR ,'config.ini'),encoding='utf-8')
    get_book(config['config']['url'],'新','/dir/986/986609.htm','从奋斗开始','作者')
    # get_booklist(config['config']['url']) 获取所有书
except BaseException as e:
    print(str(e))

    # content = ''.join(text)
    # content = html.unescape(content)
    # &nbsp; : 空格
    # <br /> : 换行 在win当中换行是\n mac \r\n
    # replace("需要被替换的内容", "替换为什么内容")
    # text = title + '\n\n' + content.replace('&nbsp;', ' ').replace('<br />', '\n').replace('<i>','').replace('</i>','')
    # get_image(url)
