# 导入requests库
import json
import re

import pymysql
import requests
# 导入文件操作库
import codecs
from bs4 import BeautifulSoup
import sys
import demjson

import importlib



#给请求指定一个headers模仿chrome
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'}

server = 'http://www.tadu.com'


#小说目录地址地址
book = 'http://www.tadu.com/book/catalogue/477699'


# 连接数据库 # 连接database
conn = pymysql.connect(
 host='localhost',
user ='root', password ='',database ='db_0',
charset ='utf8')



# 获取章节内容
def get_contents(chatper):
    req = requests.get(url=chatper, headers = headers)
    html = req.content
    html_doc = str(html,'utf8')
    bf = BeautifulSoup(html_doc, 'html.parser')
    content_url = bf.find_all('input',id='bookPartResourceUrl')[0].get('value')
    req2 = requests.get(content_url, headers = headers)
    html2 = req2.content
    html_doc2 = str(html2, 'utf8')
    #看正文内容在哪
    json1 = re.match(".*?({.*}).*",html_doc2,re.S).group(1)
    json_obj = demjson.decode(json1)
    return json_obj['content']

#写入数据库
def write_db(chapter, content):
    cursor = conn.cursor()
    sql = "INSERT INTO novel(title, content) VALUES(%(title)s, %(content)s);"
    params = {"title": chapter, "content": content}
    cursor.execute(sql, params)
    conn.commit()
    cursor.close()

def main():
    res = requests.get(book, headers = headers)
    html = res.content
    html_doc = str(html, 'utf8')
    #使用自带的htmlparser解析器
    soup = BeautifulSoup(html_doc,'html.parser')
    # 获取所有章节
    div_chapter = soup.select('div[class="chapter clearfix"]>a')

    for each in div_chapter:
        try:
            chapter = server + each.get('href')
            chapter = chapter.replace(' ','')
            content = get_contents(chapter)

            write_db(chapter, content)
        except Exception as e:
            print(e)


if __name__ == '__main__':
    main()