# 导入requests库
import requests
import os
from bs4 import BeautifulSoup
import mysql_DBUtils
from mysql_DBUtils import MyPymysqlPool

# 给请求指定一个请求头来模拟chrome浏览器
global headers
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60'}
# 全职艺术家地址
book = 'https://www.shuquge.com/txt/125497/'
# 定义DB
mysql = MyPymysqlPool("dbMysql")

# 获取章节内容
def get_contents(chapter):
    req = requests.get(url=chapter)
    req.encoding = 'utf-8'
    html_doc = req.text
    bf = BeautifulSoup(html_doc, 'html.parser')
    texts = bf.find_all('div', id="content")
    # 获取div标签id属性content的内容 \xa0 是不间断空白符 &nbsp;
    content = texts[0].text.replace('\xa0' * 4, '\n')
    return content

# 写入文件
def write_db(chapter, content):
    sql = "INSERT INTO novel (title, content) VALUES(%(title)s, %(content)s);"
    param = {"title": chapter, "content": content}
    mysql.insert(sql, param)

# 主方法
def main():
    res = requests.get(book, headers=headers)
    res.encoding = 'utf-8'
    # print(res.text)
    # 使用自带的html.parser解析
    soup = BeautifulSoup(res.text, 'html.parser')
    # # 获取所有的章节
    a = soup.find('div', class_='listmain').find_all('a')[12:]
    print('总章节数: %d ' % len(a))
    for each in a:
        try:
            chapter = book + each.get('href')
            content = get_contents(chapter)
            chapter = each.string
            print(chapter)
            write_db(chapter, content)
        except Exception as e:
            print(e)
    mysql.dispose()

if __name__ == '__main__':
    main()