# -*- coding: utf-8 -*-
# @Time    : 2018/5/21 09:08
# @Author  : daigua
# @File    : 12-笔趣阁-beautifulsoup.py
# @Software: PyCharm


from bs4 import BeautifulSoup
import re
import urllib.request
import os


def gain_html_content(url):
    """获取网页的html内容
        url:目标url地址
        content:返回的页面内容
    """
    # 构建请求对象
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"
    }
    request = urllib.request.Request(url, headers=headers)
    # 发送请求
    response = urllib.request.urlopen(request)
    # 读取文件
    content = response.read().decode('utf-8')
    return content


def get_chapter(content):
    # 先构建一个soup对象
    soup = BeautifulSoup(content, "html.parser")
    # 获取小说的标题
    title = soup.title.string
    # 找到小说的内容（是在div标签里面，并且这个div标签的id为"list"）
    content = soup.find("div", attrs={'class': 'fulldir'})
    # 获取章节列表,列表里面的内容都是标签对象
    chapter_list = content.find_all("a", attrs={"style": "", "href": re.compile(r"/.*\.html")})
    # 定义一个空的字典，用于存放章节名称和对应url的键值对
    chapter_dict = dict()
    for x in chapter_list:
        file_name = x.string
        file_url = x.attrs["href"]  # 获取a标签中href属性里面的值
        chapter_dict[file_name] = "https://www.kuaiyankanshu.net" + file_url
        # 将章节字典，和标题返回
    return chapter_dict, title


def get_text(chapter_dict, title):
    for name, url in chapter_dict.items():
        # 获取页面内容
        content = gain_html_content(url)
        soup_text = BeautifulSoup(content, "html.parser")
        new_content = soup_text.find("div", id="chaptercontent")

        # 获取soup对象中的文本信息text
        new_content = new_content.get_text()
        # 调用写入本地的函数
        write2file(title, name, new_content)


def write2file(title, file_name, content):
    """将小说写入本地文件"""
    print("%s下载中。。。" % file_name)
    direction = title + "/" + file_name
    if not os.path.exists(title):
        os.mkdir(title)
    with open(direction + ".txt", 'w') as f:
        f.write(content)
    print("%s下载完毕!" % file_name)


def main():
    # 获取页面内容
    # tar_url = input("https://www.kuaiyankanshu.net/842931/")
    tar_url = "https://www.kuaiyankanshu.net/842931/"
    content = gain_html_content(tar_url)
    #print("content%s" % content)
    # 获取 章节名字：url 字典和小说标题
    dict1, title = get_chapter(content)
    # for name, url in dict1.items():
    #     print("dict1,title:" + name+"-->"+url)

    # 获取小说内容，并写入本地txt文件
    get_text(dict1, title)


if __name__ == "__main__":
    main()