
'''

'http://www.xbiquge.la/0/8'
'''

import re
import urllib.request
import requests
from lxml import etree
import gzip
import pymysql

def gerbookurls(booknub):
    header = {
        'User-Agent': 'Mozilla/5.0(Macintosh;Inter Mac OS X 10_13_3) AppleWebkit/537.36 (KHTML,like Gecko)'
                      'Chrom/65.0.3325.162 Safari/537.36'
    }
    url = f'http://www.xbiquge.la/0/{booknub}'
    charptes = requests.get(url,headers = header).content.decode('utf-8')
    objects = etree.HTML(charptes)
    #章节链接
    objs = objects.xpath('//div[@class = "box_con"]/div/dl/dd')
    clist = []
    for obj in objs:
        try:
            chapt_urls = obj.xpath('a/@href')[0]
            chapt_names = obj.xpath('a/text()')[0]
            into = {
                'chapt_urls':'http://www.xbiquge.la'+str(chapt_urls),
                'chapt_names':chapt_names
            }
            clist.append(into)
        except:
            pass
    return clist


def getNovelContent(clist,name):
    for url in clist:
        chapter_url =  url['chapt_urls']  # 章节的超链接
        chapter_title = url['chapt_names']  # 章节的名字
        chapter_html = urllib.request.urlopen(chapter_url)
        if ('Content-Encoding', 'gzip') in chapter_html.headers._headers:
            chapter_html = chapter_html.read()
            chapter_html = gzip.decompress(chapter_html).decode('utf-8')
            chapter_reg = r'<div id="content">(.*?)<p>'
            chapter_reg = re.compile(chapter_reg, re.S)
            chapter_content = re.findall(chapter_reg, chapter_html)
            for content in chapter_content:
                content = content.replace("&nbsp;&nbsp;&nbsp;&nbsp;", "   \r\n")
                content = content.replace("<br />", "")
                f = open(r'C:\Users\Administrator\Desktop\笔趣阁小说\%s.txt' % name, 'a', encoding='gb18030', errors='ignore')
                f.write('\r\n')
                f.write(chapter_title) # 写入章节名字
                f.write('\r\n') # 换行
                f.write(content) # 写入章节内容
        else:
            chapter_html = urllib.request.urlopen(chapter_url).read().decode('utf-8')
            chapter_reg = r'<div id="content">(.*?)<p>'
            chapter_reg = re.compile(chapter_reg, re.S)
            chapter_content = re.findall(chapter_reg, chapter_html)
            for content in chapter_content:
                content = content.replace("&nbsp;&nbsp;&nbsp;&nbsp;", "")
                content = content.replace("<br />", "")
                f = open(r'C:\Users\Administrator\Desktop\笔趣阁小说\%s.txt' % name, 'a',encoding='gb18030',errors='ignore')
                f.write('\r\n')
                f.write(chapter_title) # 写入章节名字
                f.write('\r\n') # 换行
                f.write(content) # 写入章节内容


def Get_word(name):
    conn = pymysql.connect(host='127.0.0.1', port=3306, user='python', password='Mysql_123', db='python', charset='utf8')
    cur = conn.cursor()
    sql = f"SELECT serial FROM python.fiction_   WHERE  book_name  LIKE '%{name}%';"
    cur.execute(sql)
    data = []
    for line in cur.fetchall():
        for i in line:
            data.append(i)
    cur.close()
    conn.close()
    for i in data:
        return i


def main():
    name = str(input('请输入小说名字：'))
    booknub = Get_word(name)
    clist = gerbookurls(booknub)
    getNovelContent(clist,name)



if __name__ == "__main__":
    main()

