import requests
#基于scrapy框架
import parsel
"""获取网页源代码"""

#模拟浏览器发送请求
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'
}

#下载单章内容

def download_one_chapter(target_url):
    response = requests.get(target_url,headers=headers)
    # target_url为请求的网址
    #response 服务返回内容及对象
    # pycharm ctrl+鼠标左键查看源码
    #文本格式解码 万能解码（不一定）
    response.encoding=response.apparent_encoding
    #获取网页源码文本内容，字符串
    html=response.text
    # print(html)
    """从网页源代码中拿到信息"""
    sel = parsel.Selector(html)
    # extract 提取标签内容
    # 提取第一个内容，标题，伪类选择器选择属性，css选择标签
    title=sel.css('.content h1::text').extract_first()
    # 提取所有的内容
    contents = sel.css('#content::text').extract()
    """数据清除 清楚空白字符串"""
    #列表推导式
    #去除两端空白字符，对列表操作
    contents1=[content.strip() for content in contents]
    #print(contents1)
    #把列表变成字符串
    text = '\n'.join(contents1)
    #print(text)
    """保存小说内容"""
    file=open(title+'.txt',mode='w',encoding='utf-8')
    #只能写入字符串
    file.write(title)
    file.write(text)
    #关闭文件
    file.close()

"""获取书籍每章链接，目录页"""
def get_chapters_links(target_url):
    #book_url = 'http://www.shuquge.com/txt/8659/2324752.html'
    response = requests.get(target_url)
    response.encoding=response.apparent_encoding
    html=response.text
    sel=parsel.Selector(html)
    links=sel.css('dd a::attr(href)').extract()
    return links

"""下载一本小说"""
def get_one_book(book_url):
    links = get_chapters_links(book_url)
    for link in links:
        #print('http://www.shuquge.com/txt/8659/'+link)
        download_one_chapter('http://www.shuquge.com/txt/8659/'+link)

"""获取整个站点的小说目录"""
def get_category_links(novelWeb_url):
    """未完"""



"""获取整个站点小说"""
def get_all_books(novelWeb_url):
    """未完"""


if __name__ == '__main__':
    #下载别的小说直接换url
    target_url = 'http://www.shuquge.com/txt/8659/index.html'
    get_one_book(target_url)

