""""
爬虫的基本流程:
1.分析网站
    url，请求方式
2.发送请求
    向目标网站发送请求，获得对应的响应
3.解析响应
    1.html：使用xpath解析，正则--文字数据
    2.二进制文件:直接存储--二进制文件
    3.JSON串:json转为字典--文字数据
4.保存数据
    mysql,txt,redis,
    读写要求不高的数据:mysql
    读写要求高的数据:redis
"""
import urllib.request as ur
import gzip
import time
from lxml import etree

def crwal(url):
    # 发送get请求
    res = ur.urlopen(url)
    # 响应解析
    # try:
    #     res = gzip.decompress(res.read())
    # except OSError:
    res = res.read()
    # except AttributeError:
    #     res = res.decode('utf-8')
    # 构建一个element树 对象
    time.sleep(2)
    ele = etree.HTML(res)
    # 返回ele对象,方便外部调用
    return ele





"""
获取章节内容
需要传入章节网址

传入章节网址后，通过构建element树对象，对获取到的章节内容进行筛选
"""
def content(url):
    print("爬取内容中")
    ele = crwal(url)
    chapter_content = str(ele.xpath("//div[@id='content']/text()"))
    chapter_content = chapter_content.replace("[", "")
    chapter_content = chapter_content.replace("]", "")
    chapter_content = chapter_content.replace("'", "")
    chapter_content = chapter_content.replace("\\r,", "\n")
    chapter_content = chapter_content.replace("\\xa0\\xa0\\xa0\\xa0", "")
    return chapter_content

""""
将章节名称与章节内容拼接起来
获取到全部的章节名和章节网址后
通过字符串拼接获取章节名称+章节内容(调用函数)
"""
def chapter_name(url):
    print("执行内容拼接当中")
    ele = crwal(url)
    content_name = ele.xpath("//div[@id='list']/dl/dd/a/text()")
    content_url = ele.xpath("//div[@id='list']/dl/dd/a/@href")
    n = 0
    contents = " "
    for i in content_url:
        i = "http://www.xbiquge.la/" + i
        contents += str(content_name[n])+"\n" + content(i)+"\n"
        n += 1
    return contents



"""
创建书名对应的txt文件
创建文件夹并写入爬取下来的内容
"""
def novel(name,url):
    name_str = "F:\课程资料\爬虫\day_01\笔趣阁\\" + name + ".txt"
    print("创建书籍中")
    with open(name_str, "w", encoding='utf-8') as f:
        f.writelines(chapter_name(url))
        print("拷贝完成")
        f.flush()


if __name__ == '__main__':
    url = "http://www.xbiquge.la/xiaoshuodaquan/"
    ele = crwal(url)
    book_name = ele.xpath("//div[@class='novellist']/ul/li/a/text()")
    book_url = ele.xpath("//div[@class='novellist']/ul/li/a/@href")
    # 爬取第一本书的内容
    print(str(book_name[0]),type(book_name[0]))
    print(book_url[0])
    novel(book_name[0],book_url[0])



