from abc import abstractmethod
from retrying import retry, RetryError
import requests as req
from loguru import logger
from proxypool.settings import GET_TIMEOUT
import time
from fake_headers import Headers


class BaseCrawler(object):
    urls = []

    @retry(stop_max_attempt_number=3, retry_on_result=lambda x: x is None, wait_fixed=2000)
    def fetch(self, url, **kwargs):
        try:
            # 模拟请求头
            headers = Headers(headers=True).generate()
            kwargs.setdefault('timeout', GET_TIMEOUT)
            kwargs.setdefault('verify', False)
            kwargs.setdefault('headers', headers)
            # 请求
            resp = req.get(url, **kwargs)
            # 返回数据
            if resp.status_code == 200:
                resp.encoding = 'utf-8'
                return resp.text
        except (resp.ConnectionError, req.ReadTimeout):
            return
    
    def process(self, html, url):
        for proxy in self.parse(html):
            logger.info(f'从 {url} 中获取代理：{proxy.string()}')
            yield proxy

    def crawle(self):
        try:
            for url in self.urls:
                logger.info(f'爬取url：{url}')
                html = self.fetch(url)
                if not html:
                    continue
                time.sleep(0.5)
                yield from self.process(html, url)
        except RetryError:
            logger.error(
                f'爬虫 {self} 爬取代理失败, '
                '请检测目标网址是否正确运行')
    
    @abstractmethod
    def parse(self, html):
        pass