import time
import parsel
import requests

def check_ip(proxies_list):
    """检测代理IP质量的方法"""
    can_use = []
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}
    for proxy in proxies_list:
        try:
            response = requests.get('https://www.baidu.com',headers=headers,proxies=proxy,timeout=0.1)
            if  response.status_code ==200:
                can_use.append(proxy)
        except Exception as e:
            print(e)
    return can_use
proxies_list=[]
for page in range(1,5):
    print('============正在抓取第{}页============'.format(page))
    #1、确定URL路径
    base_url = 'https://www.kuaidaili.com/free/inha/{}/'.format(page)
    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}

    #2、发送请求，获取响应数据
    response = requests.get(base_url,headers=headers)
    data = response.text
    #print(data)

    #3、解析数据--parse 转化为Selector对象，
    #3.1 转换数据
    html_data = parsel.Selector(data)
    #print(html_data)
    #3.2 解析数据
    parse_list = html_data.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')
    # print(parse_list)

    #代理IP形式{"协议"："IP：port"}
    #循环遍历，二次提取
    for tr in parse_list:
        proxies_dict ={}
        http_type = tr.xpath('./td[4]/text()').extract_first()
        ip_num = tr.xpath('./td[1]/text()').extract_first()
        port_num = tr.xpath('./td[2]/text()').extract_first()
        # print(http_type,ip_num,port_num)
        
        #构建代理IP字典
        proxies_dict[http_type] = ip_num + ":" + port_num
        print(proxies_dict)
        proxies_list.append(proxies_dict)
        time.sleep(0.5)
#print(parse_list)
print("获取到的代理IP数量：",len(proxies_list),'个')

# 检测代理IP可用性
can_use=check_ip(proxies_list)
print('能用的代理：',can_use)
print("能用的代理数量：",len(can_use))