import requests as rq
import threading
import time

import numpy as np
from lxml import etree

from Agent_pond.Agent import AGENT as user_agent


def search_IP(endpage):
    global ip_https

    for page in range(1, endpage + 1):
        head = {'User-Agent': np.random.choice(user_agent)}
        r = rq.get(f"http://www.xiladaili.com/https/{page}/", headers=head)
        ip_number = etree.HTML(r.text).xpath("//tbody/tr/td[1]/text()")
        ip_is_gaoni = etree.HTML(r.text).xpath("//tbody/tr/td[3]/text()")

        number = []

        for ii, type in enumerate(ip_is_gaoni):
            if ip_is_gaoni[ii] == "高匿代理服务":
                number.append(ip_number[ii])

        ip_https += ["https://" + ip for ip in number]
        print(f"完成第{page}页IP采集")


def check_IP_https(ip):
    global ip_https_check

    try:
        r = rq.get("https://www.baidu.com", proxies={'https': ip}, headers={'User-Agent': np.random.choice(user_agent)},
                   timeout=10)
        if r.status_code == 200:
            print(ip + " 可用")
            ip_https_check.append(ip)
        else:
            print(ip + " 不可用")
    except:
        print(ip + " 不可用")


def get_old_IP():
    ip_https = list(np.load("IP_pond/IP_https.npy"))
    return ip_https


def save_IP():
    print(f"共获得{len(ip_https_check)}个 可用 IP地址")
    print(ip_https_check)
    np.save("IP_pond/IP_https.npy", ip_https_check)


if __name__ == "__main__":
    t1 = time.time()

    ip_https = get_old_IP()
    ip_https_check = []  # 经检查可用

    print("获取已有 IP 成功！")
    search_IP(5)

    ip_https = np.unique(ip_https)
    print(f"下载完成，准备检验{len(ip_https)}个IP！")

    wait_thread = []
    for ip in ip_https:
        t = threading.Thread(target=check_IP_https, args=(ip,))
        wait_thread.append(t)
        t.start()

    for w in wait_thread:
        w.join()  # 全部堵塞住，保证先执行完检查，才进行 save

    save_IP()

    t2 = time.time()
    print(f"全部完成耗时{round(t2 - t1, 1)}")
