"""
懒人小说网爬虫
"""
from bs4 import BeautifulSoup
from urllib import request
import sys
import os
import time

# 定义一个网站主域名,后面拼接域名使用
web_site = "https://www.lrxsw.org/"
book_name = ""


def get_raw_web_data(url):
    """
    获取网页源码,主要是这个在获取目录的以及获取文章内容的时候常用
    懒得重复写所以我就
    这个网站居然加入了反爬,的添加伪装头
    :param url:目标网页连接
    :return:网页的原生源码
    """

    # 避免爬着爬着就暴毙还是加入异常捕获吧
    try:
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
        req = request.Request(url=url, headers=headers)
        html = request.urlopen(req).read()
        html = html.decode('gb18030')
        return html
    except Exception as e:
        print(e)
        return


def get_index_chapter(url):
    global book_name
    raw_html = get_raw_web_data(url)
    soup = BeautifulSoup(raw_html, 'lxml')
    table_content = soup.select('html body div#wrapper div#container div#main div.inner dl.chapterlist dd a')
    book_name = soup.find(class_='btitle').h1.text + "    " + soup.select('html body div#wrapper div#container div.bookinfo div.btitle em a')[0].get_text()

    # 定义一个list存储章节链接
    chapter_url_list = []
    for element in table_content:
        chapter_url_list.append(web_site + element.get('href'))
    return chapter_url_list


def get_article_content(url):
    raw_html = get_raw_web_data(url)
    soup = BeautifulSoup(raw_html, 'lxml')
    chater_name = soup.find(id='BookCon').h1.text
    chater_content = soup.find(id='BookText').text.replace('\n\r\n', '\n')
    return '\n' + chater_name + '\n' + chater_content


if __name__ == "__main__":
    target = sys.argv[1]
    table_list = get_index_chapter(target)
    path = os.path.join(os.getcwd(), "Ebook/"+book_name+".txt")
    if len(sys.argv) > 2:
        index = table_list.index(sys.argv[2])
        table_list = table_list[index:]
    total_chapter = len(table_list)
    total_now = 1
    for i in table_list:
        print(time.strftime("%m-%d %H:%M:%S", time.localtime()) + "    " + i + "    " + str(total_now) + "/" + str(total_chapter))
        content = get_article_content(i)
        with open(path, 'a') as f:
            f.write(content)
        time.sleep(1)
        total_now += 1
