#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
爬取代理网站上的代理IP及端口并保存在本地
"""

__author__ = 'hubert'

import time

from bs4 import BeautifulSoup
import requests



headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.55',
    'Connection': 'keep-alive'
}

encoding = 'uft-8'


def write_txt(f_txt, tr_list):
    for item in tr_list:
        td_ip = item.select('td[data-title="IP"]')
        td_port = item.select('td[data-title="PORT"]')
        td_res = item.select('td[data-title="响应速度"]')
        td_type = item.select('td[data-title="类型"]')

        if len(td_res) == 1:
            speed = td_res[0].text.replace("秒", "")
            if float(speed) < 1:
                proxies = {"http": "http://" + str(td_ip[0].text) + ":" + str(td_port[0].text)}
                # print(proxies)
                # 用于测试爬到的ip是否能用
                # test_url = "https://www.baidu.com/"
                test_url = "http://nginx.org/"
                try:
                    res = requests.get(test_url, proxies=proxies, timeout=10)
                    if res.status_code == 200:
                        print("http://" + str(td_ip[0].text) + ":" + str(td_port[0].text))
                        f_txt.write("http://" + str(td_ip[0].text) + ":" + str(td_port[0].text) + "\n")
                except Exception as ex_get:
                    print("异常:", ex_get)



# 获取1页数据
def get_ip_proxy1():
    # 第一页
    page_num = 1
    url = "https://www.kuaidaili.com/free/inha/"
    req = requests.get(url + str(page_num), headers=headers)
    req.encoding = encoding
    soup = BeautifulSoup(req.text, 'lxml')
    div_list = soup.find(id="list")
    table = div_list.find(class_="table table-bordered table-striped")
    tbody = table.find("tbody")
    tr_list = tbody.find_all("tr")
    with open("ip_list.txt", "w", encoding="utf-8") as f_txt:
        write_txt(f_txt, tr_list)

# 获取前N-1页数据
def get_ip_proxy2(n):
    with open("ip_list.txt", "w", encoding="utf-8") as f_txt:
        # 获取3页
        for i in range(1, n):
            url = "https://www.kuaidaili.com/free/inha/{}/".format(i)
            print(url)
            time.sleep(3)
            req = requests.get(url, headers=headers)
            req.encoding = encoding
            soup = BeautifulSoup(req.text, 'lxml')
            div_list = soup.find(id="list")
            table = div_list.find(class_="table table-bordered table-striped")
            tbody = table.find("tbody")
            tr_list = tbody.find_all("tr")
            # 写入文件
            write_txt(f_txt, tr_list)

def get_proxy_test():
    # url = "http://icanhazip.com/"
    # url = "https://www.baidu.com/"
    url = "http://httpbin.org/ip"
    proxies = {'http':'http://202.55.5.209:8090'}
    req = requests.get(url, headers=headers, proxies=proxies,timeout=20)
    # req.encoding = 'utf-8'
    req.encoding = 'gbk'
    print(req.text)


"""
    telnetlib在python3.11版本中已弃用 
    https://docs.python.org/3.10/library/telnetlib.html#
"""
def test_ip(ip,port):
    try:
       # th = telnetlib3.TelnetClient(ip, port, timeout=10)
        print("代理ip有效！")
    except:
        print("代理ip无效！")


# 主程序
if __name__ == '__main__':
    # 获取前4-1页数据
    get_ip_proxy2(20)
    # get_proxy_test()
    # test_ip("202.55.5.209", "8090")







