import urllib.request
import re
import time

# 分析网站
# 获取主页源代码
# 获取章节连接
# 获取小说内容
# 下载小说

# 爬取小说内容
def getNoverContent():
    # 获取页面代码   采集地址
    html = urllib.request.urlopen('http://www.quanshuwang.com/book/0/269').read()
    # 编码
    html = html.decode('gbk')

    # 获取超链接
    req = '<li><a href="(.*?)" title=".*?">(.*?)</a></li>'
    urls = re.findall(req,html)

    for url in urls:
        novel_url = url[0]
        novel_title = url[1]
        chaps = urllib.request.urlopen(novel_url).read()
        chaps_html = chaps.decode('gbk')
        req = r'</script>&nbsp;&nbsp;&nbsp;&nbsp;(.*?)<script type="text/javascript">'
        # 多行匹配
        req = re.compile(req,re.S)
        chaps_content = re.findall(req,chaps_html)

        # 替换
        chaps_content = chaps_content[0].replace('<br />','')
        chaps_content = chaps_content.replace('&nbsp;','')
        chaps_content = chaps_content.replace('(《》)','')
        chaps_content = "".join(chaps_content.split())
        chaps_content = chaps_content + '\n'
        chaps_content = chaps_content[0:3] + '\n' + chaps_content + '\n'
        # time.sleep(1)
        print("正在下载",novel_title)
        # f = open('{}.txt'.format(novel_title),'w')
        with open('1.txt'.format(novel_title),'a') as f:
            f.write(chaps_content)
        time.sleep(1)




getNoverContent()