from bs4 import BeautifulSoup
from urllib import request
import os
import time
import sys

book_name = ""


def get_raw_web_data(url):
    """
    获取网页源码,主要是这个在获取目录的以及获取文章内容的时候常用
    懒得重复写所以我就
    :param url:目标网页连接
    :return:网页的原生源码
    """
    response = request.urlopen(url)
    html = response.read()

    # 这货网页里面有GB2312无法处理的字符:0x86,所以用更大的字符集来解码网页规避错误
    # 这里还有另一个思路就是替换掉0x86
    html = html.decode('gb18030')
    return html


def get_table_of_content(url):
    """
    获取小说的目录
    :param url: 小说首页链接
    :return: 小说的章节目录
    """
    raw_html = get_raw_web_data(url)
    soup = BeautifulSoup(raw_html, 'lxml')
    table_content = soup.select('#list > dl:nth-child(1) > dd > a')
    global book_name
    book_name = soup.find(id="info").h1.text + soup.select("#info > p:nth-child(2)")[0].get_text().replace('\xa0', ' ')
    table_url = []  # 小说的章节对应的链接
    for target_url in table_content:
        _url = target_url.get('href')
        table_url.append(_url)

    # 目录章节列表去重
    # 目的是删除笔趣阁坑爹的会把最新的九章放到章节目录里面
    table_size = len(table_url) - 1
    list_cursor_tail = -1
    for i in range(0, 10):
        if table_url[0] == table_url[list_cursor_tail]:
            table_url.pop(0)
        list_cursor_tail -= 1

    return table_url


def get_article_content(url):
    """
    获取文章内容以及标题
    :param url: 文章页链接
    :return: 文章标题 换行 文章内容
    """
    raw_html = get_raw_web_data(url)
    soup = BeautifulSoup(raw_html, 'lxml')
    title = soup.find(class_='bookname').h1.text
    content_list = soup.find(id='content').find_all('p')
    content = ""
    for p_text in content_list:
        p_text = p_text.text.strip('<')  # 删除行尾无用符号
        content = content + p_text + "\n"
    return title + "\n" + content


if __name__ == "__main__":
    target = sys.argv[1]
    table_list = get_table_of_content(target)
    path = os.path.join(os.getcwd(), "Ebook/"+book_name+".txt")
    if len(sys.argv) > 2:
        index = table_list.index(sys.argv[2])
        table_list = table_list[index:]
    total_chapter = len(table_list)
    total_now = 1
    for i in table_list:
        print(time.strftime("%m-%d %H:%M:%S", time.localtime()) + "    " + i + "    " + str(total_now) + "/" + str(total_chapter))
        content = get_article_content(i)
        with open(path, 'a') as f:
            f.write(content)
        time.sleep(1)
        total_now += 1