import grequests
import time

from lxml import etree

from baiduSpider import get_proxy

headers = {
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'max-age=0',
    'Referer': 'https://www.baidu.com',
}
params = {
    'kw': 123
}
retry_count = 1
def makeproxies():
    proxy = get_proxy().get("proxy")
    proxies = {
        'http': 'http://' + proxy,
        # 'https': 'https://' + proxy,
    }
    print(proxy)
    return proxies


Rqnum = 0
YZnum = 0
start = time.time()


def get_text(html):
    # 这个函数要求必须html是成品，不能是空
        # print(html)
    doc = etree.HTML(html)  # 获取文档树
    main1 = doc.xpath('//em/text()')
    print(main1)
    # print("".join(main1))
    baidu_FindWord = "".join(main1)
    print(baidu_FindWord)
    return baidu_FindWord


#生产一个请求函数，返回请求内容
def getUrls(urls:[]):
    req_list = [grequests.get(i,headers=headers,params=params,proxies=makeproxies(),verify= False) for i in urls]
    print(len(req_list))
    responseList = grequests.map(req_list)
    print(len(responseList))
    return responseList

def checkUseHtml(html):
    if '百度' not in html:
        return False

    if html == None:
        print(f"5次请求失败，正在切换代理ip尝试第{Rqnum}次尝试")
        # time.sleep(3)
        return False
    elif '验证' in html:
        # print(html)
        # time.sleep(3)
        print(f"该ip出现验证码，切换ip第{YZnum}次尝试")
        return False
    else:
        #进入这个函数必须是经过严格把控，直接可以出内容的，各种问题前边过滤（百度已经成功出内容了）
        baidu_FindWord = get_text(html)
        print(baidu_FindWord)
        return True



urls =['http://httpbin.org/get1','http://httpbin.org/get2','http://httpbin.org/get3','http://httpbin.org/get4']





while len(urls)>0:
    saveUrl = urls.copy() #保存原始url

    responseList = getUrls(urls)  #获取请求结果列表

    #返回列表
    for i in responseList:
        if i == None:
            print('说明这个请求没有获取成功')
            # 需要做处理# 需要做处理# 需要做处理
            continue
        state = checkUseHtml(i.text)  #网页内容检查，通过就返回true，不通过，就返回false进行下一轮请求
        print(state)
        if state == True: #已经正常处理,记录返回的索引，为了定位成功的url，把url也pop出来
            listKeyNum = responseList.index(i)   #这个就是原来本轮请求的索引值
            urls.pop(listKeyNum)  #url 弹出成功url，不在进行请求
    print('进入下一轮循环')





