import time
from proxypool.schemas.proxy import Proxy
from proxypool.crawlers.base import BaseCrawler
from loguru import logger
from parsel import Selector

BASE_URL = 'https://www.zdaye.com/dayProxy/{page}.html'
MAX_PAGE = 5 * 2

class ZhanDayeCrawler(BaseCrawler):

    urls_catalog = [BASE_URL.format(page=page) for page in range(1, MAX_PAGE)]
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
    }
    urls = []
    # 有这个属性将不扫描
    ignore = True

    def crawle(self):
        self.crawl_catalog()
        yield from super().crawle()

    def crawl_catalog(self):
        for url in self.urls_catalog:
            logger.info(f'爬取 {url}')
            html = self.fetch(url, headers=self.headers)
            self.parse_catalog(html)

    def parse_catalog(self, html):
        """
        解析详情页url
        """
        sel = Selector(html)
        for a in sel.css('#J_posts_list .thread_item div div p a'):
            url = f'https://www.zdaye.com{a.css("a::attr(href)").get()}'
            logger.info(f'获取详情页: {url}')
            self.urls.append(url)

    def parse(self, html):
        sel = Selector(html)
        tds = sel.css('#ipc td::text')
        for i in range(0, len(tds), 2):
            ip = tds[i].re_first('[\d\\.]+')
            port = tds[i+1].re_first('\d+')
            if not (ip and port):
                continue
            time.sleep(3)
            yield Proxy(ip.strip(), int(port.strip()))