import random
import re
import requests
import parsel
import urllib3
import time
from lxml import etree

urllib3.disable_warnings()

headers = {
    'Referer': 'http://zuopinj.com/',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'
}


def check_ip(proxies_list):
    """检测代理IP质量的方法"""
    can_use = []
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}
    for proxy in proxies_list:
        try:
            response = requests.get('https://www.baidu.com', headers=headers, proxies=proxy, timeout=0.1)
            if response.status_code == 200:
                can_use.append(proxy)
        except Exception as e:
            print(e)
    return can_use


proxies_list = []
def getpro():
    for page in range(1, 3):
        print('============正在抓取第{}页============'.format(page))
        # 1、确定URL路径
        base_url = 'https://www.kuaidaili.com/free/inha/{}/'.format(page)
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}

        # 2、发送请求，获取响应数据
        response = requests.get(base_url, headers=headers)
        data = response.text
        # print(data)

        # 3、解析数据--parse 转化为Selector对象，
        # 3.1 转换数据
        html_data = parsel.Selector(data)
        # print(html_data)
        # 3.2 解析数据
        parse_list = html_data.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')
        # print(parse_list)

        # 代理IP形式{"协议"："IP：port"}
        # 循环遍历，二次提取
        for tr in parse_list:
            proxies_dict = {}
            http_type = tr.xpath('./td[4]/text()').extract_first()
            ip_num = tr.xpath('./td[1]/text()').extract_first()
            port_num = tr.xpath('./td[2]/text()').extract_first()
            # print(http_type,ip_num,port_num)

            # 构建代理IP字典
            proxies_dict[http_type] = ip_num + ":" + port_num
            print(proxies_dict)
            proxies_list.append(proxies_dict)
            time.sleep(0.5)


def getbook():
    response = requests.get('https://www.qidian.com/free', headers=headers, proxies=random.choice(can_use))
    response.encoding = response.apparent_encoding
    response_text = parsel.Selector(response.text)

    response_url = response_text.xpath(
        '//*[@id="limit-list"]/div/ul//li/div[2]/h4/a/@href').extract()
    for p in response_url:
        response1 = requests.get('https:' + p, headers=headers, proxies=random.choice(can_use))
        response1.encoding = response1.apparent_encoding
        response1_html = parsel.Selector(response1.text)
        mulv = response1_html.xpath(
            '//*[@id="j-catalogWrap"]/div[2]/div/ul/li/a/text()').extract()
        mulv_url = response1_html.xpath(
            '//*[@id="j-catalogWrap"]/div[2]/div/ul/li/a/@href').extract()
        for q in mulv_url:
            response2 = requests.get('https:' + q, headers=headers, verify=False, proxies=random.choice(can_use))
            response2.encoding = response2.apparent_encoding
            context = parsel.Selector(response2.text)
            selector = etree.HTML(response2.text)
            text1 = selector.xpath('//div[@class="read-content j_readContent"]/p/span[1]/text()')
            title = context.xpath('//div[@class="main-text-wrap "]/div[1]/h3/span[1]/text()').extract_first()
            print(title)
            print(text1)


if __name__ == '__main__':
    getpro()
    print("获取到的代理IP数量：", len(proxies_list), '个')
    # 检测代理IP可用性
    can_use = check_ip(proxies_list)
    print('能用的代理：', can_use)
    print("能用的代理数量：", len(can_use))
    getbook()
