# -*- coding:utf-8 -*-   #如果要在python2的py文件里面写中文，则必须要添加一行声明文件编码的注释，否则python2会默认使用ASCII编码。
import requests
import re
import  pymysql

conn = pymysql.connect(
    host = 'localhost',
    port = 3306,
    user = 'root',
    passwd = '123456',
    db = 'python',
    charset = 'utf8'
)
# # 游标             #
cursor = conn.cursor()
def get_novel_sort_list():                                                       #定义获取小说分类的列表的函数
    response=requests.get('http://www.quanshuwang.com/list/1_1.html')                            #要爬去的目标网址
    response.encoding='gbk'      #
    result = response.text                                                                            #写出所爬去的内容
    reg = r'<a target="_blank" title=".*?" href="(.*?)" class="clearfix stitle">(.*?)</a>'    #正则匹配你所需要的内容
    novel_url_list = re.findall(reg,result)                                                           #找出所有网址 和名字  并形成列表
    # print(novel_url_list)
    # print(novel_url_list)
    return novel_url_list

def get_novel_content(url):                                  #爬取小说详细页面下开始按钮的链接
    response = requests.get(url)                             #接收到上面匹配到的网址   进入到第一本小说后的内容
    response.encoding = 'gbk'                                                                       #
    result = response.text
    reg = r'<a href="(.*?)" class="reader" title=".*?">开始阅读</a>'                             #
    ks_novel_url = re.findall(reg,result)[0]                                                           #
    return  ks_novel_url                                                                             #"http://www.quanshuwang.com/book/0/269"

def get_chapter_url_list(url):                                        #
    response = requests.get(url)
    response.encoding = 'gbk'
    result = response.text
    reg = r'<li><a href="(.*?)" title=".*?">(.*?)</a></li>'
    chapter_url_list = re.findall(reg,result)
    return chapter_url_list                                               #得到章节列表
    # print(chapter_url_list)

def get_chapter_content(url):
    response = requests.get(url)       #chapter_url = "http://www.quanshuwang.com/book/135/135776/36150580.html"
    response.encoding = 'gbk'
    result = response.text
    reg = r'style5\(\);</script>(.*?)<script type="text/javascript">style6'    #\\的作用将括号转义为原本的意义
    # reg = r'style5\(\);</script>(.*?)<script type="text/javascript">style6();'
    chapter_content = re.findall(reg,result,re.S)[0]
    return chapter_content                                          #得到小说里面的实际内容

#通过对获取到的数组进行遍历
for novel_url,novel_name in get_novel_sort_list():          #"http://www.quanshuwang.com/book_9055.html"    title="盗墓笔记"
    # get_novel_sort_list()
    # print('------------------------')
    # print(novel_url)
    novel_content_url = get_novel_content(novel_url)
    # 保存小说
    cursor.execute("insert into novel1(name) value('{}')".format(novel_name))
    novelid = cursor.lastrowid                                     #读取保存在输出数据库中书的id
    conn.commit()
    for chapter_url,chapter_name in get_chapter_url_list(novel_content_url):
        print(chapter_name)
        chapter_content = get_chapter_content(chapter_url)
        cursor.execute("insert into chapter(name,content,novelid ) values('{}','{}',{})".format(chapter_name,chapter_content,novelid))
        conn.commit()


cursor.close()
conn.close()