import logging
import os
import time

import requests
from bs4 import BeautifulSoup
"""
单线程爬取斗破苍穹小说
"""


# ======================初始化

# 设置loging等级
logging.basicConfig(level=logging.DEBUG)
# 代理UA
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
# 小说网站域名
DOMAIN = 'http://www.xbiquge.la'
# 爬取的小说txt存放目录
DIR_NAME = './斗罗大陆小说'
if not os.path.exists(DIR_NAME):
    os.makedirs(DIR_NAME)

# =======================工具函数


def timer(func):
    def inner(*args, **kwargs):
        start = time.time()
        ret = func(*args, **kwargs)
        end = time.time()
        logging.info(' 执行{}函数花费时间: {} s'.format(func.__name__, end-start))
    return inner


# =======================业务函数

def get_html(url):
    """
    获取url的html内容
    @return str html文本内容
    """
    html = requests.get(url).content.decode('utf-8')
    return html


def get_chapater_url_list():
    """
    获取小说章节url列表
    @return iterator(迭代器) 章节列表url 
    """
    url = "http://www.xbiquge.la/1/1710/"
    html = get_html(url)
    soup = BeautifulSoup(html, 'lxml')
    logging.debug("获取小说章节html如下: ")
    logging.debug(soup.prettify())
    # 提取章节
    a_tag_list = soup.select('#list')[0].select('a')

    logging.debug("提取章节a标签列表如下: ")
    logging.debug('共{}个a标签,第一个: {},最后一个: {}'.format(
        len(a_tag_list), a_tag_list[0], a_tag_list[-1]))

    # 获取所有a标签内的url,map在python3中返回的是迭代器
    a_tag_url_list = map(lambda a_tag: a_tag['href'], a_tag_list)
    logging.debug('提取a标签列表的url完成')
    return a_tag_url_list


def crawling_article_and_write_file(url_list, name):
    """
    爬去小说章节消息内容,并将其写入文件中
    @param url_list 小说章节url列表
    @param name 文件名称
    """
    file_name = os.path.join(DIR_NAME, name)
    with open(file_name, 'w', encoding='utf-8') as fp:
        for url in url_list:
            html = get_html(DOMAIN+url)
            soup = BeautifulSoup(html, 'lxml')
            # 文章标题
            title = soup.select('div.bookname')[0].h1.text
            # 文章内容
            article = soup.select('div#content')[0].text
            # 写入文件中
            fp.writelines(title)
            fp.writelines(article)


@timer
def main():
    chapter_list = get_chapater_url_list()
    crawling_article_and_write_file(chapter_list, '斗破苍穹.txt')


if __name__ == "__main__":
    main()
