import re

import requests

from day10.demo02__text import MovieExcelPipeLine, MovieItem
from day10.proxy_spider import ProxySpider
from xunter_utils.xunter_requests import ChromeClient

# client = ChromeClient(proxy_dir = 'proxy_ips')
client = ProxySpider()
res = requests.get('https://ip.ihuan.me/?page=eas7a436', headers={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
    'Cookie': '76bacbfd97c799b432133d901bcf73ed=e6ca8082f6a910a19aa6354a6b262f2c; Hm_lvt_8ccd0ef22095c2eebfe4cd6187dea829=1733829339; HMACCOUNT=D2B61B492E415A52; Hm_lpvt_8ccd0ef22095c2eebfe4cd6187dea829=1733831131',
    'Referer': 'https://ip.ihuan.me/?page=came0299'
})
res = re.findall(r'<img src=(.*?)<a href=', res.text)
for item in res:
    print(item)
count = 0
for i in res:
    lists = re.findall(r'(?<=svg">)\d{1,3}(?:\.\d{1,3}){3}|(?<=<td>)\d+', i)
    ip = '%s:%s' % (lists[0], lists[1])
    print(ip)
    if client.check_ip(ip):
        client.proxies[ip] = ['www.xiaohuan.download']
        count += 1
        print(client.proxies[ip])
client.save_ip_json('text.json')

# print(lists)
# for i in lists:
#     print(i)
#
#     if client.check_ip(ip):
#         client.proxies[ip] = ['www.proxy-list.download']
#         count += 1
# client.save_ip_json('text.json')