'''
@Author: your name
@Date: 2020-03-23 20:31:27
@LastEditTime: 2020-03-23 21:00:39
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \giee\learn_python\爬虫\爬虫基础\练习项目\requests多ip.py
'''
import requests
from fake_useragent import UserAgent
from lxml import etree  # 将字符串转成lxml可以解析的格式
import random

def get_html(url):
    """url----->str"""

    # 构建伪装头
    headers = {
        "User-Agent": UserAgent().random
    }

    ip = ["121.237.149.203", "114.99.54.65", "110.73.83.2", "121.237.148.82", "121.237.148.98"]

    proxy = {
        "HTTPS": random.sample(ip, 1)
    }
    
    print(proxy)

    # 构建response响应
    response = requests.get(url, headers=headers, proxies=proxy)
    print(type(response.text))

    return response.text

def main():

    for num in range(1,50):

        url = "https://www.qidian.com/rank/yuepiao?page={}".format(num)

        # 获取页面
        str = get_html(url)

        # 获取可被lxml解析的格式
        e = etree.HTML(str)

        # 解析(以列表的格式返回)
        book_name = e.xpath('//h4/a/text()')
        author = e.xpath('//p[@class="author"]/a[1]/text()')

        # 第二种遍历方法: 元组赋值到元组
        for book_name, author in zip(book_name, author):
            print(book_name + " : " + author)


if __name__ == "__main__":
    main()