#爬虫第二本小说   爬取的数据可能会有些换行不对
import requests
import re
# requests.adapters.DEFAULT_RETRIES = 5
url = 'https://www.qu.la/book/25052/'
# 模拟浏览器发送Http请求
response = requests.get(url)
# print(response.text)
# 目标小说的网页源码
repon = response.text
# 获取小说的名字
title = re.findall(r'<meta property="og:title" content="(.*?)"/>',repon)[0]
print(title)
# 获取每一章节的信息
d1 = re.findall(r'<dl>.*?</dl>',repon,re.S)[0]
chapter_info_list = re.findall(r'<a style="(.*?)" href="(.*?)">(.*?)</a>',d1,re.S)
print(chapter_info_list)
# 新建一个文件  用来保存瞎小说
fb = open('%s.txt'%title,'w',encoding='utf-8')
# 循环每个章节  分别去下载
for chapter_info in chapter_info_list[12:]:
    chapter_a,chapter_url,chapter_title = chapter_info
    chapter_url = "https://www.qu.la%s" % chapter_url
    print(chapter_url,chapter_title)
    # #下载章节内容
    chapter_response = requests.get(chapter_url)
    chapter_html = chapter_response.text
    # 提取章节内容
    chapter_content = re.findall(r'<div id="content">(.*?)</div>',str(chapter_html),re.S)[0]
    chapter_content = chapter_content.replace('<br>', '')
    chapter_content = chapter_content.replace('<br />', '')
    chapter_content = chapter_content.replace('<br/>', '')
    chapter_content = chapter_content.replace('&nbsp', '')
    chapter_content = chapter_content.replace(r'\u3000', '')
    chapter_content = chapter_content.replace('<script>chaptererror();</script>','')
    chapter_content = chapter_content.replace('</br>', '')
    chapter_content = chapter_content.replace('\n', '')
    fb.writelines(chapter_title)
    fb.writelines(chapter_content)
    print(chapter_content)
