# -*- coding:utf-8 -*-
# @Time:2020/4/25 18:50
# @Author:lbjyu
# @Email:1154363414@qq.com
# @File:XzhdxNovel

import requests
from bs4 import BeautifulSoup
from db.Mysql_DbUtils import MyPymysqlPool

global headers
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0'}
# 星辰变地址
book = 'http://book.zongheng.com/showchapter/189169.html'
# 定义DB
mysql = MyPymysqlPool("dbMysql")


# 获取章节内容
def get_contents(chapter):
    req = requests.get(url=chapter)
    html = req.text
    bf = BeautifulSoup(html, 'html.parser')
    # 章节里面的在正文 是从 <div class="content" itemprop="acticleBody">标签开始，所以获取该标签下面的文本
    texts = bf.find('div', class_="content", itemprop="acticleBody")
    # 将第一个换行符替换为空字符串，方便数据库查看
    return texts.text.replace("\n", "", 1)


# 写入数据库
def write_db(chapter, content):
    sql = "INSERT INTO books (title, content) VALUES(%(title)s, %(content)s);"
    param = {"title": chapter, "content": content}
    mysql.insert(sql, param)
    # 每解析一张就 提交事务保存到表里面
    mysql.end()


# 主方法
def main():
    res = requests.get(book, headers=headers)
    html = res.text;
    soup = BeautifulSoup(html, 'html.parser')
    # 每一个大章节 的样式都是 <ul class="chapter-list clearfix">
    # 所以先取出所有的大章节
    chapterALl = soup.findAll("ul", class_="chapter-list clearfix")
    print("一共大的章节数 %d", len(chapterALl))
    for chapter in chapterALl:
        # 循环取出每个大章节里面的所有子章节的 a 标签
        # 循环取出每个大章节里面的所有子章节的 a 标签
        # < a href = "http://book.zongheng.com/chapter/18916 ....
        a = chapter.findAll("a")
        print("当前章节子章节数 %d", len(chapterALl))
        for each in a:
            try:
                chapter = each.get('href')
                content = get_contents(chapter)
                chapter = each.string
                write_db(chapter, content)
            except Exception as e:
                print(e)
    mysql.dispose()
    print('雪中悍刀行所有章节数据写表结束')


if __name__ == '__main__':
    main()
