'''
@Author: your name
@Date: 2020-03-23 22:26:07
@LastEditTime: 2020-03-24 13:11:34
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \giee\learn_python\爬虫\爬虫基础\练习项目\起点多线程应用.py
'''
'''
@Author: your name
@Date: 2020-03-23 20:31:27
@LastEditTime: 2020-03-23 21:00:39
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: \giee\learn_python\爬虫\爬虫基础\练习项目\requests多ip.py
'''
import requests
from fake_useragent import UserAgent
from lxml import etree  # 将字符串转成lxml可以解析的格式
import random
import threading
from queue import Queue
import time

def get_html(url):
    """url----->str"""

    # 构建伪装头
    headers = {
        "User-Agent": UserAgent().random
    }

    ip = ["121.237.149.203", "114.99.54.65", "110.73.83.2", "121.237.148.82", "121.237.148.98"]

    proxy = {
        "HTTPS": random.sample(ip, 1)
    }
    
    print(proxy)

    # 构建response响应
    response = requests.get(url, headers=headers)
    print(type(response.text))

    return response.text


def main2(url):


    # 获取页面
    str = get_html(url)

    # 获取可被lxml解析的格式
    e = etree.HTML(str)

    # 解析(以列表的格式返回)
    book_name = e.xpath('//h4/a/text()')
    author = e.xpath('//p[@class="author"]/a[1]/text()')

    # 第二种遍历方法: 元组赋值到元组
    for book_name, author in zip(book_name, author):
        c = book_name + " : " + author
        print(c)
        

def main():

    li = []

    for num in range(1,51):
        url = "https://www.qidian.com/rank/yuepiao?page={}".format(num)
        li.append(url)
        
    for i in range(10):
        t1 = threading.Thread(target=main2, args=(li[2*i+1],))
        t2 = threading.Thread(target=main2, args=(li[2*i+2],))
        t3 = threading.Thread(target=main2, args=(li[2*i+3],))
        t4 = threading.Thread(target=main2, args=(li[2*i+4],))

        t1.start()
        t2.start()
        t3.start()
        t4.start()
        time.sleep(2)
        print("第{}组完成".format(i))

        


if __name__ == "__main__":
    main()