import requests
import parsel
import time
#检测代理IP的方法
def check_ip(proxies_list):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
    }
    can_use=[]
    for proxy in proxies_list:                  #如果能在0.1秒内返回数据，IP代理池有效
        try:
            respose=requests.get('https://www.baidu.com',headers=headers,proxies=proxy,timeout=0.1)
            if respose.status_code==200:
                can_use.append(proxy)
        except Exception as e:
            print(proxy,e)
    return can_use
#####################
proxies_list=[]
for page in range(1,8):
    print('===============正在爬取第{}页数据=========='.format(page))
    url='https://www.kuaidaili.com/free/inha/{}/'.format(str(page))
    headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
        }
    respose=requests.get(url,headers=headers)
    respose.encoding=respose.apparent_encoding
    data=respose.text
    html_data=parsel.Selector(data)
    tr_parse=html_data.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')
    # print(tr_parse)
    for tr in tr_parse:
        proxies_dict={}
                 #(./)表示取到当前标签
        http_type=tr.xpath('./td[4]/text()').extract_first()    #索引是从1开始，不是0 ,text()把标签包裹的文本内容取出来,extract_first()数据只有一个
        ip_num = tr.xpath('./td[1]/text()').extract_first()
        ip_port = tr.xpath('./td[2]/text()').extract_first()
        #print(http_type,ip_port,ip_num)
        proxies_dict[http_type]=ip_num+':'+ip_port
        #print(proxies_dict)
        print('正在爬取',proxies_dict)
        proxies_list.append(proxies_dict)

    time.sleep(1)#爬取速度过快，对方服务器没有返回数据，

print(proxies_list)
print('获取到的代理IP的数量:',len(proxies_list))
#########
can_use=check_ip(proxies_list)
print('质量高的代理IP',can_use)
print('质量高的代理IP的数量:',len(can_use))