"""
    使用resquests库爬取燃文小说网
        目标网址：http://www.ranwenw.com/xiaoshuodaquan/
        需要设置的请求头：
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36 Edg/93.0.961.52',
            'Cookie': 'UM_distinctid=17b686300aa1d8-0f6ca80b47cb95-7868786b-144000-17b686300ab898; CNZZDATA1259263205=1590965726-1629539521-null%7C1629544942',
            'Referer': 'http://www.ranwenw.com/',
            'Cache-Control': 'max-age=0',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
            'Connection': 'keep-alive'
        }
        目标：爬取到返回页面中的所有小说的书名和小说对应的url路径
"""
import requests
from lxml import etree


if __name__ == '__main__':
    # 目标网址
    url = 'http://www.ranwenw.com/xiaoshuodaquan/'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36 Edg/93.0.961.52',
        'Cookie': 'UM_distinctid=17b686300aa1d8-0f6ca80b47cb95-7868786b-144000-17b686300ab898; CNZZDATA1259263205=1590965726-1629539521-null%7C1629544942',
        'Referer': 'http://www.ranwenw.com/',
        'Cache-Control': 'max-age=0',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
        'Connection': 'keep-alive'
    }
    response = requests.get(url=url, headers=headers)
    # response_text_for_JBK = response.text
    # text = response_text_for_JBK.encode('utf-8').decode('utf-8')
    # print(text)
    # 得到响应的html
    response_html = response.content.decode('gbk')
    selectObject = etree.HTML(response_html)
    elements_object_text = selectObject.xpath('//*[@id="main"]/div/ul//li//text()')
    elements_object_href = selectObject.xpath('//*[@id="main"]/div/ul//li//@href')
    # print(len(elements_object_text))
    # print(len(elements_object_href))
    # novel = dict(zip(elements_object_href, elements_object_text))
    # print(novel)
    task = []
    for i in range(len(elements_object_href)):
        obj = {'name': elements_object_text[i], 'href': elements_object_href[i]}
        task.append(obj)
    # 遍历所有任务
    for t in task[:]:
        # requests.get(url=t.get('href'), headers=headers)
        print(t.get('href'))
    # for element in elements:
    #     print(element)
