
import requests
import re
import json
from lxml import etree
from bs4 import BeautifulSoup


# 获取ip list
def get_ip_list():
    url = "https://www.xicidaili.com/"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36',
    }

    res = requests.get(url, headers=headers)
    # print(res.text)
    html = etree.HTML(res.text)
    ip_list = html.xpath('//*[@id="ip_list"]/tr')
    print('ip list=', len(ip_list))
    if ip_list:
        print('ip=', ip_list[0].xpath('//td[2]/text()'))


def open_proxy_url(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36',
    }
    try:
        r = requests.get(url, headers=headers, timeout=20)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        print('无法访问网页' + url)


def get_proxy_ip(response):
    proxy_ip_list = []
    soup = BeautifulSoup(response, 'html.parser')
    proxy_ips = soup.find(id='ip_list').find_all('tr')
    for proxy_ip in proxy_ips:
        if len(proxy_ip.select('td')) >= 8:
            # print('==', proxy_ip.select('td'))
            ip = proxy_ip.select('td')[1].text
            port = proxy_ip.select('td')[2].text
            protocol = proxy_ip.select('td')[5].text
            if protocol in ('HTTP', 'HTTPS', 'http', 'https'):
                proxy_ip_list.append(f'{protocol}://{ip}:{port}')
    return proxy_ip_list


# 使用代理ip打开百度页面
def open_url_using_proxy(url, proxy):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36',
    }
    proxies = {}
    if proxy.startswith('HTTPS'):
        proxies['https'] = proxy
    else:
        proxies['http'] = proxy
    try:
        r = requests.get(url, headers=headers, proxies=proxies, timeout=10)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text, r.status_code
    except:
        print('无法访问网页' + url)
        return False


# 检查代理ip的有效性
def check_proxy_avaliability(proxy):
    url = 'http://www.baidu.com'
    result = open_url_using_proxy(url, proxy)
    VALID_PROXY = False
    if result:
        text, status_code = result
        if status_code == 200:
            r_title = re.findall('<title>.*</title>', text)
            if r_title:
                if r_title[0] == '<title>百度一下，你就知道</title>':
                    VALID_PROXY = True
        if VALID_PROXY:
            check_ip_url = 'https://jsonip.com/'
            try:
                text, status_code = open_url_using_proxy(check_ip_url, proxy)
            except:
                return

            print('有效代理IP: ' + proxy)
            with open('valid_proxy_ip.txt', 'a') as f:
                f.writelines(proxy)
            try:
                source_ip = json.loads(text).get('ip')
                print(f'源IP地址为：{source_ip}')
                print('='*40)
            except:
                print('返回的非json,无法解析')
                print(text)
    else:
        print('无效代理IP: ' + proxy)


if __name__ == '__main__':
    # 获取代理ip
    # proxy_url = 'https://www.xicidaili.com/'
    # text = open_proxy_url(proxy_url)
    # proxy_ip_filename = 'proxy_ip.txt'
    # with open(proxy_ip_filename, 'w') as f:
    #     f.write(text)
    # text = open(proxy_ip_filename, 'r').read()
    # proxy_ip_list = get_proxy_ip(text)
    # print(len(proxy_ip_list))
    # print(proxy_ip_list)
    #
    # # 测试代理ip打开
    # url = 'http://www.baidu.com'
    # text = open_url_using_proxy(url, proxy_ip_list[0])

    # Data whale test
    proxy_url = 'https://www.xicidaili.com/'
    proxy_ip_filename = 'proxy_ip.txt'
    text = open(proxy_ip_filename, 'r').read()
    proxy_ip_list = get_proxy_ip(text)
    for proxy in proxy_ip_list:
        check_proxy_avaliability(proxy)

