from impoter_bag import *
from my_api import *

sources = []
#  现在仅仅是将内容读入 sources 列表
with open('ip_sources.info', 'r', encoding='utf-8') as file:
    for line in file.readlines():
        if line.startswith('#'):
            continue
        sources.append(line.strip().split(','))

#  这里是开始爬网页中的可用IP
ip_list = set()


def strip(s: str):
    return s.strip()


def get_ip_in_web(format_url, ip_xpath, port_xpath, mode, page=1, max_page=35) -> None:
    # 依据模式在网页中抓取IP，且存入ip_list，以备检查和保存
    err = 0
    model = re.compile(r'''<int>''')
    while True:
        url = model.sub(str(page), format_url)
        try:
            content = rq.get(url, headers=get_ua(), proxies=get_ip(), timeout=12)
        except rq.exceptions.ProxyError as error:
            err += 1
            print(f'代理出错！正在尝试新的代理！信息为：\n[{error}]')
            print(f'正在尝试第{err}次，第{MAX_RETRIES}次将会直接退出！')
            if err > MAX_RETRIES:
                return
            continue
        except Exception as error:
            print(f'请求爬取出错，信息为：\n[{error}]')
            return
        else:
            err = 0
            content.encoding = 'utf-8'
            tree = etree.HTML(content.text)
        if 'reverse' in mode:
            group = list(zip(map(strip, tree.xpath(ip_xpath)[::-1]), map(strip, tree.xpath(port_xpath)[::-1])))
            if not group:
                print('爬取', url, '出错！')
                if int(page) > max_page:
                    return
                page = int(page) + 1
                continue
        else:
            group = list(zip(map(strip, tree.xpath(ip_xpath)), map(strip, tree.xpath(port_xpath))))
            if not group:
                print('爬取', url, '出错！')
                if int(page) > max_page:
                    return
                page = int(page) + 1
                continue
        if 'pop' in mode:
            group = group[:len(group) - 1]
        #  这里爬取完毕，准备储存
        for i in group:
            ip_list.add(':'.join(i))
        print(f'[{url}]的第{page}页爬取完毕！')
        page = int(page) + 1


#  这里是为主程序服务的检查和储存IP函数
avail_ip = []


def filter_ip(ip):
    is_avail = check_ip(ip)
    if is_avail:
        avail_ip.append(ip)
    return


def dump(li, file_name='ip_avail'):
    if not li:
        print('可用IP列表为空！DUMP失败！')
        return
    else:
        with open(file_name + '.pk', 'wb') as file_:
            file_.seek(0)
            file_.truncate(0)
            temp = pickle.dumps(li)
            file_.write(temp)
        print(f'存储{len(li)}个可用IP完毕!')


#  这里是一键校验文件中的可用IP，并剔除不可用IP
def daily_check():
    can_be_used = []
    print('开始检查文件IP情况！')
    with open('ip_avail.pk', 'rb') as file__:
        try:
            wait_check = pickle.load(file__)
        except Exception as error:
            print(f'加载出错！信息为：{error}')
            return

    def checking(IP):
        is_avail = check_ip(IP)
        if is_avail:
            can_be_used.append(IP)
        return

    with tp(20) as my_pool:
        orders = []
        for ip in wait_check:
            orders.append(my_pool.submit(checking, IP=ip))
    for _ in as_completed(orders):
        _.result()
    if set(can_be_used) == set(wait_check):
        print('IP一切正常！')
        return
    elif not can_be_used:
        with open('ip_avail.pk', 'wb') as file__:
            print('孩子，洗洗睡吧，你的IP全没了！')
            file__.seek(0)
            file__.truncate(0)
        return
    else:
        print(f'''这些IP已弃用！\n{set(wait_check) - set(can_be_used)}''')
        with open('ip_avail.pk', 'wb') as file__:
            file__.seek(0)
            file__.truncate(0)
            pickle.dump(can_be_used, file__)
        return


#  这里开始设置爬取和储存：
if __name__ == '__main__':
    daily_check()
    with tp(3) as my_thread_pool:
        print('线程开启成功，开始爬取IP！')
        tasks = []
        for j in sources:
            task = my_thread_pool.submit(get_ip_in_web, j[0], j[1], j[2], j[3], j[4])
            tasks.append(task)
        print('任务分配完成！')
    for _ in as_completed(tasks):
        _.result()
    print('本次爬到的IP为：', ip_list)

    with tp(20) as my_thread_pool:
        print('线程开启成功，开始校验可用IP！')
        tasks = []
        with open('ip_avail.pk', 'rb') as file:
            ip_list = list(ip_list)
            ip_list += [] if getsize('ip_avail.pk') == 0 else pickle.load(file)
            ip_list = set(ip_list)
        for k in ip_list:
            task = my_thread_pool.submit(filter_ip, k)
            tasks.append(task)
    for _ in as_completed(tasks):
        _.result()
    dump(avail_ip)
    print('任务完成，线程池关闭！')
