import requests
from bs4 import BeautifulSoup
import re
import json
from check_ip import check_proxy_ip
import time
import random


# 请求头
headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}

#ip列表
# ip_list = []
# for page in range(1, 21):
#     # 高匿开发代理URL
#     url = f"https://www.kuaidaili.com/free/inha/{page}"
#     html = requests.get(url, headers=headers).text
#
#     """
#     ip地址是放在HTML代码的数组里面，
#     我们先用正则将整个数组地址从HTML代码提取取来
#     然后再转成列表
#     """
#     # 提取数组的正则表达式
#     pattern = "const fpsList = (.*?);"
#     rv = re.search(pattern, html)
#     # 将数组字符串转成列表
#     ips = json.loads(rv.group(1))
#
#     #
#     for item in ips:
#         # 将IP和端口拼接成这种格式  47.122.65.254:80
#         rv = check_proxy_ip(item["ip"], item["port"])
#         if rv:
#             ip_port = item["ip"] + ":" + item["port"]
#             ip_list.append(ip_port)
#             print(ip_port, "是有效IP")
#
#
#     # 休眠时间 时间随机
#     t = round(random.random(), 3)
#     time.sleep(t)
#
#
# new_ip_list = list(set(ip_list))

ip_list = []  # 存储ip
current_page = 1  # 当前页码
while True:
    url = f"https://www.kuaidaili.com/free/inha/{current_page}"
    html = requests.get(url, headers=headers).text

    bs = BeautifulSoup(html, "lxml")
    if bs.body.text == "page error":
        break


    pattern = "const fpsList = (.*?);"
    rv = re.search(pattern, html)
    # 将数组字符串转成列表
    ips = json.loads(rv.group(1))

    for item in ips:
        rv = check_proxy_ip(item["ip"], item["port"])
        if rv:
            ip_port = item["ip"] + ":" + item["port"]
            ip_list.append(ip_port)
            print(ip_port, " 有效!")


    # 休眠
    t = round(random.random(), 3)
    time.sleep(t)

    current_page += 1

# 将ip写入文件
fp = open("ip_pool.txt", "a", encoding="utf-8")
for ip in ip_list:
    fp.write(ip + "\n")
fp.close()













