# !python3      # 使用工具:PyCharm
# 用户登录名:yyds_l
# 项目名称: python爬虫     文件名称:001_爬取小说
# 创建日期:2022/3/23      创建时间:22:07
# 2022/03/23 22:07

"""ok"""
import requests
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
import re
import os

'''
    一. 获取数据
        利用urllib获取网页的数据
    二. 筛选数据
        使用bs4查找需要的数据
    三. 保存数据
        re库写入文件，保存数据
    四. 使用数据
        （可视化数据等等）
'''


# 小说目录
def getData(header):
    try:
        # 文章地址
        url = "http://book.zongheng.com/showchapter/1013348.html"
        res = urllib.request.Request(url=url, headers=header)
        data = urllib.request.urlopen(res)
        content = data.read().decode("utf-8")
        # print(content)
        soup = BeautifulSoup(content, 'html.parser')
        name = soup.select(".book-meta > h1")
        for o in name:
            print("-"*16, "正在爬取小说：《%s》" % (o.string), "-"*16)
        name_p = soup.select(".book-meta > p")
        for ol in name_p:
            print("%s" % (ol.text))
        return content
    except Exception as err:
        print(err)


# 获取章节URL
def url_list(content):
    soup = BeautifulSoup(content, "html.parser")
    # print(soup)
    a_list_url = soup.select('.col-4>a')
    # print(len(a_list_url))
    # for i in a_list_url:
    #     print(i)
    # put = input("请输入需要爬取的开始章节：")
    # put2 = input("请输入需要爬取的结束章节：")
    return a_list_url


# 获取每一个章节的内容
def data_cont(cont_data, header):
    # print(cont_data)
    for i in cont_data:
        name = i.text       # 小说章节名称
        with open("小说目录/" + name + ".txt", "a", encoding="utf-8") as fp:
            fp.write(name + '\n')
            fp.close()
        # print(name)
        cont_url = i['href']        # 章节url
        # print(cont_url)
        print_ok = i['title']       # 章节信息
        # print(print_ok)
        # 访问每一个章节
        res = urllib.request.Request(url=cont_url, headers=header)
        html = urllib.request.urlopen(res)
        html_data = html.read().decode('utf-8')
        # print(html_data)
        soup = BeautifulSoup(html_data, 'html.parser')
        # print(soup)
        bookinfo1 = soup.select('.bookinfo')
        for bk in bookinfo1:
            b_p = bk.text
            # print(b_p)
            with open("小说目录/" + name + ".txt", "a", encoding="utf-8") as fp:
                fp.write(b_p)
                fp.close()
        content_text = soup.select('div .content > p')
        for k in content_text:
            k_txt = k.text
            # print(k_txt)
            with open("小说目录/"+name+".txt", "a", encoding="utf-8") as fp:
                fp.write(k_txt+'\n')
                fp.close()
        print("正在爬取：%s" % (print_ok), "-"*12+"成功！！")
    print("ok", '共 %s个文件' % (len(cont_data)))


if __name__ == "__main__":
    header = {  # 伪装请求头
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/98.0.4758.102 Safari/537.36 "
    }
    content = getData(header)        # 爬取小说目录
    cont_data = url_list(content)       # 每一个章节url
    data_cont(cont_data, header)        # 每一个章节的内容
