import collections
import copy
import json
import re
import os
import time
import traceback
from collections import abc
from twisted.internet import reactor, task

from datetime import datetime
from squirrel_core.commons.utils.logger import Logging
from squirrel_core.commons.config.logconstant import SPIDER_INIT_J, SPIDER_INIT_ERR_J, CRAWL_STAT_S_J, LC_J, \
    SPIDER_IDLE_J, SPIDER_CLOSE_J
from squirrel_core.commons.database import ProviderType
from squirrel_core.commons.database.db_provider import get_provider
from squirrel_core.commons.scrapy_base import defaults
from squirrel_core.commons.scrapy_base.picklecompat import loads, dumps
from squirrel_core.commons.signals import request_scheduler_popped
from squirrel_core.commons.utils.tools import bytes_to_str
from squirrel_core.commons.utils.tools import patch_global_logger
from squirrel_core.commons.utils.tools import str_to_bool, str_to_int
# from squirrel_core.item import PageUnreachableItem
from squirrel_core.frame.progress_state import StateEnum, StateInterface
from scrapy.extensions.corestats import CoreStats
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Request
from scrapy.utils.reqser import request_to_dict, request_from_dict
from squirrel_core.commons.utils.get_config import get_config


class Spiders(StateInterface):
    name = 'Spiders'
    allowed_domains = []
    start_urls = []

    msg_check_period = 10
    status_check_period = 30
    need_ssdbstore_dup = False
    section = os.environ.get("RUN_ENV", "dev")
    base_config_from_conf = get_config(sections="base")
    # config_from_conf = get_config(sections=section)

    def __init__(self, *args, **kwargs):
        self.q_lc = None
        self.s_lc = None
        self.lc_list = []
        self.job_started_in_spider = False
        self.force_to_close_spider = False

        self.queues = None
        self.name_first = None
        self.name_second = None

        self.stats = None
        self.settings = None
        self.close_after_idle = True
        self.server = None
        self.start_url_key = None
        self.seed_server = None
        self.db_encoding = None
        self.db_batch_size = None
        self._keymsg_t = {}
        self.logger = kwargs.get('logger') if kwargs.get('logger') else Logging()
        patch_global_logger(self.logger)
        self._last_req_time = time.time()
        self.close_spider_after_idle_seconds = int(
            self.base_config_from_conf.get('scrapy_spider.close_spider_after_idle_seconds', 300))
        self.logger.debug('读取配置文件配置: {}'.format(self.base_config_from_conf))
        self.finalize_loop()

    def finalize_loop(self):
        import asyncio
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        loop.close()

    def update_allow_domains(self, domains):
        for d in domains:
            if d and '.' in d:
                try:
                    self.allowed_domains.append(d.strip())
                except Exception as e:
                    self.logger.debug(f'Append domain:<{d}> failed, error; {e}')
        self.logger.debug((SPIDER_INIT_J + 'allowed domains is: %s') % (self.job_id, self.allowed_domains))

    def setup_db(self, crawler=None):
        settings = crawler.settings
        self.server = self.get_db_provider(settings, "DEFAULT_DB_CONFIG")
        crawler.server = self.server
        crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
        crawler.signals.connect(self.request_scheduler_popped, signal=request_scheduler_popped)
        crawler.signals.connect(self.busy, signal=signals.item_scraped)
        crawler.signals.connect(self.busy, signal=signals.request_received)
        crawler.signals.connect(self.busy, signal=signals.request_scheduled)
        crawler.signals.connect(self.busy, signal=signals.request_dropped)

    def update_field(self, crawler=None):
        if crawler is None:
            crawler = getattr(self, 'crawler', None)
        if crawler is None:
            raise ValueError("crawler is required")
        settings = crawler.settings
        if self.start_url_key is None:
            self.start_url_key = settings.get(
                'DB_START_URLS_KEY', defaults.START_URLS_KEY,
            )
        self.start_url_key = self.start_url_key % {'name': self.name}
        if not self.start_url_key.strip():
            raise ValueError("start_url_key must not be empty")
        if self.db_batch_size is None:
            self.db_batch_size = settings.getint('CONCURRENT_REQUESTS')
        try:
            self.db_batch_size = int(self.db_batch_size)
        except (TypeError, ValueError):
            raise ValueError("db_batch_size must be an integer")
        if self.db_encoding is None:
            self.db_encoding = settings.get('DB_ENCODING', defaults.DB_ENCODING)
        self.logger.info("Reading start URLs from db key '%(start_url_key)s' "
                         "(batch size: %(db_batch_size)s, encoding: %(db_encoding)s",
                         self.__dict__)

    def get_db_provider(self, settings, db_config_key):
        def _def_close():
            # from twisted.internet import reactor
            reactor.stop()

        init_params = dict(settings.get(db_config_key))
        try:
            p = get_provider(init_params.pop("db_type", ProviderType.SSDB), logger=self.logger, **init_params)
            return p
        except:
            self.logger.error(f'Error when initial provider: {init_params} ,error : {traceback.format_exc()}')
            # from twisted.internet import reactor
            reactor.callLater(10, _def_close)
            raise

    def init_looping_calls(self):
        # from twisted.internet import task
        self.logger.info((SPIDER_INIT_J + 'Initial period tasks: (queue_check:%s), (status_check:%s)') %
                         (self.job_id, self.msg_check_period, self.status_check_period))
        self.q_lc = task.LoopingCall(self.check_queue)
        self.q_lc.start(self.msg_check_period, now=False)
        self.lc_list.append(self.q_lc)
        self.s_lc = task.LoopingCall(self.report_crawl_status)
        self.s_lc.start(self.status_check_period, now=False)
        self.lc_list.append(self.s_lc)
        # if self._is_debug(self.config_from_conf):
        #     if hasattr(self, 'config') and self.config and isinstance(self.config, collections.MutableMapping):
        #         _p_n = self.config.get('process_num')
        #
        #         system_start_defer_time = 0 if _p_n <= 1 else _p_n * 1.5
        #     else:
        #         system_start_defer_time = 0
        # else:
        system_start_defer_time = int(self.base_config_from_conf.get('scrapy_settings.start_defer_time', 35))

        task.deferLater(reactor, system_start_defer_time, self.init_start_url)

    @staticmethod
    def _is_debug(conf_from_conf):
        return 1 == int(conf_from_conf.get('debug.enable', 0))

    def add_looping_call(self, func, interval=30, now=False):
        import inspect
        # from twisted.internet import task
        try:
            if inspect.ismethod(func) and hasattr(self, func.__name__):
                lc = task.LoopingCall(func)
                lc.start(interval=interval, now=now)
                self.lc_list.append(lc)
            else:
                self.logger.debug('Add looping calls failed, func may be a not function')
        except:
            pass

    def report_crawl_status(self):
        crawled_count = self.stats.get_value('item_scraped_count') or 0
        response_count = self.stats.get_value('response_received_count') or 0
        message = {
            'crawled': crawled_count,
            'job_id': self.job_id,
            'total_response': response_count
        }
        message_str = json.dumps(message)
        self.logger.info((CRAWL_STAT_S_J + message_str) % self.job_id)

    def get_ext_requests_or_urls(self, data=None):
        return []

    def clear_start_urls(self, crawler):
        try:
            crawl_increment = crawler.settings["CRAWL_INCREMENT"]
            if not crawl_increment:
                self.server.queue_clear(self.start_url_key)
                self.logger.info(
                    (SPIDER_INIT_J + 'Drop start url %s item for %s') % (self.job_id, self.start_url_key, self.name))
        except:
            err_msg = traceback.format_exc()
            self.logger.debug(
                (SPIDER_INIT_ERR_J + 'Failed to clear start request or url, error: \r\n %s') % (self.job_id, err_msg))

    def start_requests(self):
        if self.job_started_in_spider:
            return self.next_requests()
        else:
            return []

    def make_requests_from_url(self, url):
        return Request(url, headers={"job_id": self.job_id}, meta={"headers_4_dup": ['job_id']})

    def init_start_url(self):
        patch_global_logger(self.logger)
        try:
            externals = self.get_ext_requests_or_urls()
            if externals:
                if isinstance(externals, (list, tuple)):
                    self.start_urls.extend(externals)
                elif isinstance(externals, str) or issubclass(type(externals), Request):
                    self.start_urls.append(externals)
                else:
                    self.logger.debug((SPIDER_INIT_J + 'Invalid external start request or url') % self.job_id)
            # if self.start_requests():
            #     self.start_urls.append(next(self.start_requests()))
        except Exception as err:
            self.logger.debug((SPIDER_INIT_ERR_J + 'Failed to get external start request or url, '
                                                   'error: \r\n %s') % (self.job_id, err),
                              level='info')
        _urls = set(self.start_urls)
        while len(_urls):
            item = _urls.pop()
            if not item:
                self.logger.info((SPIDER_INIT_J + 'Empty start url item for %s') % (self.job_id, self.name))
                continue
            if not issubclass(type(item), Request):
                # str url
                item = Request(url=item)

            if item.meta is None:

                item.meta = {'_is_start_request': True}
            else:
                item.meta['_is_start_request'] = True

            self.convert_request_with_filter(item)
            url = item.url
            self.logger.info((SPIDER_INIT_J + 'Send request for url:%s to queue:%s') % (self.job_id, url, self.name))
            item = request_to_dict(item, self)
            item = dumps(item)

            try:
                self.server.queue_push(self.start_url_key, item)
                self._keymsg_t['content'] = (SPIDER_INIT_J + 'Send url:%s to queue:%s') % (self.job_id, url, self.name)
                self.logger.info(json.dumps(self._keymsg_t))
            except Exception as e:
                self._keymsg_t['content'] = (SPIDER_INIT_ERR_J + 'Send url:<%s> to ssdb failed, '
                                                                 'ssdb may be not connected!, error: \r\n %s') % (
                                                self.job_id, url, e)
                self.logger.error(json.dumps(self._keymsg_t))
        self._keymsg_t['content'] = (SPIDER_INIT_J + 'Set job started in spider True') % self.job_id
        self.logger.debug(json.dumps(self._keymsg_t))
        self.job_started_in_spider = True

    def convert_request_with_filter(self, item):
        if not item.headers:
            item.headers = {}
        item.headers.update({"job_id": self.job_id})
        if isinstance(item.meta, dict):
            item.meta.update({"headers_4_dup": ['job_id']})

    def check_queue(self):
        try:
            if self.queues[0].qsize():
                self.logger.debug((LC_J + 'check message queues: found') % self.job_id)
                while self.queues[0].qsize():
                    try:
                        q = self.queues[0].get_nowait()
                        if q['message_code'] == 'stop_crawler':
                            pass
                        elif q['message_code'] in ['stop_spider', 'stop_all', 'stop']:
                            try:
                                command = {
                                    'message_code': 'spider_closed',
                                    'spider_name': self.name,
                                    'reason': "shutdown",
                                    'time_stamp': time.time()
                                }
                                self.queues[1].put(command)
                                self.logger.info("Successful to send closed message to framework")
                            except:
                                self.logger.error("Failed to send closed message to framework")
                            self._keymsg_t['content'] = (LC_J + 'Check queue message: stop spider') % self.job_id
                            self.logger.info(json.dumps(self._keymsg_t))
                            self.crawler.engine.close()
                    except Exception as e:
                        self._keymsg_t['content'] = (LC_J + 'Check q error: \r\n %s') % (self.job_id, e)
                        self.logger.warning(json.dumps(self._keymsg_t))
            else:
                self.logger.debug((LC_J + 'Check message queues: not found') % self.job_id)
        except Exception as e:
            print(e)

    def request_scheduler_popped(self):
        self.logger.debug("Reset idle times since request popped from scheduler")

    def spider_idle(self):
        self.logger.debug((SPIDER_IDLE_J + 'timestamp %s') %
                          (self.job_id, time.time()))
        if not self.job_started_in_spider:
            self.logger.debug((SPIDER_IDLE_J + 'Job started don\'t close, system defer time... ') % self.job_id)
            raise DontCloseSpider
        self.schedule_next_requests()
        if self.close_after_idle:
            if not self.force_to_close_spider:
                if time.time() - self._last_req_time < self.close_spider_after_idle_seconds:
                    self.logger.debug((SPIDER_IDLE_J + 'Don\'t close spider since idle_time not exceeded.')
                                      % self.job_id)
                    raise DontCloseSpider
                else:
                    self.logger.info((SPIDER_IDLE_J + 'start idle time exceed %s in %s seconds, '
                                                      'now: %s, last_req: %s') % \
                                     (self.job_id, self.close_spider_after_idle_seconds,
                                      time.time() - self._last_req_time, time.time(), self._last_req_time))
        else:
            self.logger.debug((SPIDER_IDLE_J + 'Don\'t close spider. Spider may not started or '
                                               'spider not allowed to close after idle.') % self.job_id)
            raise DontCloseSpider

    def make_request_from_data(self, data):
        try:
            if isinstance(data, bytes) and re.match(b'^http|phantom|chrome.*', data):
                url = bytes_to_str(data, self.db_encoding)
                return self.make_requests_from_url(url)
        except:
            err_msg = traceback.format_exc()
            self.logger.debug('Current item get from ssdb maybe a request, error:\r\n{err}'.format(err=err_msg))
        try:
            request_dict = loads(data)
            request = request_from_dict(request_dict, self)
            return request
        except:
            err_msg = traceback.format_exc()
            self.logger.debug(
                'Current item get from ssdb is neither a url nor request, error:\r\n{err}'.format(err=err_msg))

    def schedule_next_requests(self):
        # TODO: While there is capacity, schedule a batch of db requests.
        for req in self.next_requests():
            self._last_req_time = time.time()
            self.crawler.engine.crawl(req, spider=self)

    def next_requests(self):
        self._keymsg_t['content'] = 'job_id:%s try to get url from ssdb:' % self.job_id
        self.logger.debug(json.dumps(self._keymsg_t))
        fetch_one = self.server.queue_pop
        found = 0

        # TODO: Use ssdb pipeline execution.
        if not self.force_to_close_spider:
            while found < self.db_batch_size:
                data = fetch_one(self.start_url_key)
                if not data:
                    # Queue empty.
                    break
                req = self.make_request_from_data(data)
                if req:
                    self._last_req_time = time.time()
                    yield req
                    found += 1
                else:
                    self._keymsg_t['content'] = "Request not made from data: %r" % data
                    self.logger.debug(json.dumps(self._keymsg_t))
            if found:
                self._keymsg_t['content'] = "Read {found} requests from '{start_url_key}'".format(found=found,
                                                                                                  start_url_key=self.start_url_key)
                self.logger.debug(json.dumps(self._keymsg_t))

    def closed(self, reason):
        """
        :param reason:
        :return:
        """
        try:
            # command = {
            #     'message_code': 'spider_closed',
            #     'spider_name': self.name,
            #     'reason': reason,
            #     'time_stamp': time.time()
            # }
            # self.queues[1].put(command)
            if hasattr(self.crawler, "mq_server"):
                try:
                    mq = self.crawler.mq_server
                    mq.close_connection()
                except Exception as e:
                    self.logger.error(f"关闭MQ失败：{e}")
            self.logger.info("Successful to send closed message to framework")
        except:
            self.logger.error("Failed to send closed message to framework")
        for _lc in self.lc_list:
            try:
                _lc.stop()
            except:
                pass
        self.force_to_close_spider = True
        try:
            self._keymsg_t['content'] = (SPIDER_CLOSE_J + ' spider_class:<%s>; stop reason:<%s>') % (
                self.job_id, self.name, reason)
            self.logger.info(json.dumps(self._keymsg_t))
            self._log_from_extension()
        except Exception as e:
            self._keymsg_t['content'] = (SPIDER_CLOSE_J + 'Spider close put queue message failed, %s') % (
                self.job_id, e)
            self.logger.warning(json.dumps(self._keymsg_t))

    def _log_from_extension(self):
        stats = None
        for ext in self.crawler.extensions.middlewares:
            if isinstance(ext, CoreStats):
                stats = ext.stats._stats
                break
        if stats:
            scraped = stats.get('item_scraped_count', 0)
            self.write_key_log(StateEnum.I_FINISH_WITH_OUTPUT if scraped > 0 else
                               StateEnum.I_FINISH_NO_OUTPUT, data={'item_scraped_count': scraped})

            request_c = stats.get('downloader/request_count', 0)
            self.write_key_log(StateEnum.I_REQUEST_COUNT, data={'request_count': request_c})
            response_c = stats.get('downloader/response_count', 0)
            self.write_key_log(StateEnum.I_RESPONSE_COUNT, data={'response_count': response_c})
        self.write_key_log(StateEnum.I_STOP)

    def common_item_assembler(self, response, item):
        self._keymsg_t['content'] = 'Start to assemble common attributes'
        self.logger.debug(json.dumps(self._keymsg_t))
        item['name_first'] = self.name_first
        item['name_second'] = self.name_second
        item['uptime'] = int(time.time())
        item['do_time'] = datetime.now().strftime('%Y-%m-%d')
        item['version'] = '1'
        item['url'] = response.request.url

    # def page_monitor_item_assembler(self, response):
    #     item = PageUnreachableItem()
    #     self.common_item_assembler(response, item)
    #     item["proxy"] = response.meta.get("proxy", "")
    #     item["proxy_source"] = response.meta.get("proxy_source", "")
    #     return item

    @classmethod
    def _update_settings_in_from_cralwer(cls, crawler, crawler_config):
        crawler.settings.frozen = False
        crawler.settings['SCRAPE_COUNT_KEY_NAME'] = 'SCRAPE_COUNT_KEY_NAME'
        cls.update_proxy_setting(crawler.settings, crawler_config)
        cls.update_business_setting(crawler.settings, crawler_config)
        cls.update_db_setting(crawler.settings, crawler_config, cls.base_config_from_conf)
        cls.update_scrapy_settings(crawler.settings, crawler_config, cls.base_config_from_conf)
        if hasattr(crawler.spidercls, 'specific_settings'):
            specific_settings = crawler.spidercls.specific_settings
            cls._update_specific_setting_from_spider(crawler.settings, specific_settings)
        cls.update_pipeline_limit_settings(crawler.settings)
        crawler.settings.freeze()

    @staticmethod
    def update_pipeline_limit_settings(setting):
        pipeline_items = {}
        item_pipelines = setting.get("ITEM_PIPELINES_LIMITATION", {})
        if item_pipelines and isinstance(item_pipelines, abc.MutableMapping):
            for item, pipelines in item_pipelines.items():
                if isinstance(pipelines, (abc.MutableSequence,)):
                    for pipeline in pipelines:
                        if pipeline not in pipeline_items:
                            pipeline_items[pipeline] = set()
                        pipeline_items[pipeline].add(item)
                elif isinstance(pipelines, str):
                    if pipelines not in pipeline_items:
                        pipeline_items[pipelines] = set()
                    pipeline_items[pipelines].add(item)
        setting["PIPELINE_ITEMS_LIMITATION"] = pipeline_items

    @staticmethod
    def update_proxy_setting(settings, crawler_config):
        settings['PROXY_MAX_USE'] = str_to_int(crawler_config.get('proxy_config', {}).get('proxy_max_use_count', 0))
        settings['USE_PROXY'] = str_to_bool(crawler_config.get('proxy_config', {}).
                                            get('use_proxy', crawler_config.get('use_proxy', True)))
        settings['PROXY_Q_NAME'] = crawler_config.get('proxy_config', {}).get(
            'proxy_q_name') or 'spider_proxy'
        settings['PROXY_TYPE'] = crawler_config.get('proxy_config', {}).get('type', [3, 2, 1])

    @staticmethod
    def update_business_setting(settings, crawler_config):
        _crawler_config = crawler_config.get('job_config', {})
        settings['NAME_FIRST'] = crawler_config.get('name_first')
        settings['NAME_SECOND'] = crawler_config.get('name_second')
        settings['JOB_ID'] = crawler_config.get('serialNumber')

    @staticmethod
    def update_db_setting(settings, crawler_config, base_cfg_mon):
        spider_config = crawler_config.get('spider_config', {})
        result_storage = spider_config.get('result_storage', {})

        db_reconnect_retry = base_cfg_mon.get('db_conf.db_reconnect_retry', 5)
        sleep_time_once_failed = base_cfg_mon.get('db_conf.sleep_time_once_failed', 1)
        settings['DB_RECONNECT_RETRY'] = str_to_int(db_reconnect_retry)
        settings['SLEEP_TIME_ONCE_FAILED'] = str_to_int(sleep_time_once_failed)

        settings['DATABASE_DATA_SETTING'] = result_storage

        settings['DATABASE_DATA_SETTING'].update({
            'seed_host': base_cfg_mon.get('ssdb_host'),
            'seed_port': base_cfg_mon.get('ssdb_port'),
        })

        settings["DEFAULT_DB_CONFIG"] = {
            "sleep_time_once_failed": int(sleep_time_once_failed),
            "db_reconnect_retry": int(db_reconnect_retry),
            "db_setting": {
                "db_type": ProviderType.SSDB,
                "host": base_cfg_mon.get('ssdb_host'),
                "port": int(base_cfg_mon.get('ssdb_port'))
            }
        }

        settings["SEED_DB_CONFIG"] = {
            "sleep_time_once_failed": int(sleep_time_once_failed),
            "db_reconnect_retry": int(db_reconnect_retry),
            "db_setting": {
                "db_type": ProviderType.SSDB,
                "host": base_cfg_mon.get('ssdb_host'),
                "port": int(base_cfg_mon.get('ssdb_port'))
            }
        }

        settings['SSDB_PARAMS'] = {'ssdb_cls': 'pyssdb.Client'}
        settings['TABLE_NAME'] = base_cfg_mon.get('ssdb_table_name')

    @staticmethod
    def update_scrapy_settings(settings, crawler_config, base_cfg_mon):
        settings['CONCURRENT_REQUESTS'] = str_to_int(crawler_config.get('concurrent', 16))
        settings['DOWNLOAD_DELAY'] = str_to_int(crawler_config.get('download_delay', 0))
        settings['COOKIES_ENABLED'] = str_to_bool(crawler_config.get('enable_cookie', False))

        settings['ENV'] = crawler_config.get("env", "")
        if settings['ENV'] == "test":
            settings['DUPE_FILTER_KEY'] = "".join(['test_', crawler_config.get('name_first'), '_', crawler_config.get('name_second')])
            settings['PARSER_DUPE_FILTER_KEY'] = "".join(["test_Parser_", crawler_config.get('name_first'),
                                                          '_', crawler_config.get('name_second')])
        else:
            settings['DUPE_FILTER_KEY'] = "".join([crawler_config.get('name_first'), '_', crawler_config.get('name_second')])
            settings['PARSER_DUPE_FILTER_KEY'] = "".join(["Parser_", crawler_config.get('name_first'),
                                                          '_', crawler_config.get('name_second')])
        settings["SCHEDULER_DUPEFILTER_KEY"] = defaults.SCHEDULER_DUPEFILTER_KEY
        settings['CRAWL_INCREMENT'] = str_to_bool(crawler_config.get('crawl_increment', True))
        if 1 == int(base_cfg_mon.get('debug.enable', 0)) and crawler_config.get('process_num', 1) == 1:
            settings['SYS_DEFER_TIME'] = 2
            settings['DUPEFILTER_DEBUG'] = True
        else:
            settings['SYS_DEFER_TIME'] = str_to_int(base_cfg_mon.get('scrapy_settings.start_defer_time', 40))

        settings['ERROR_SEED_MAX_RETRY'] = str_to_int(base_cfg_mon.get('seed_config.error_max_retry', 5))
        settings['ERROR_REQUEST_MAX_RECYCLE'] = str_to_int(
            base_cfg_mon.get('request_config.request_failed_max_recycle', 3))

    @staticmethod
    def _update_specific_setting_from_spider(crawler_settings, _custom_settings):
        for setting in _custom_settings:
            try:
                if setting in crawler_settings:
                    if isinstance(_custom_settings[setting], dict):
                        _s = dict(copy.deepcopy(crawler_settings[setting]))
                        _s.update(_custom_settings[setting])
                    elif isinstance(_custom_settings[setting], str):
                        _s = _custom_settings[setting]
                    elif isinstance(_custom_settings[setting], list):
                        _s = list(copy.deepcopy(crawler_settings[setting]))
                        _s += _custom_settings[setting]
                    else:
                        _s = _custom_settings[setting]
                    crawler_settings.set(setting, _s, 'spider')
                else:
                    if setting == "HTTPERROR_ALLOWED_CODES":
                        for code in _custom_settings[setting]:
                            if code in crawler_settings["RETRY_HTTP_CODES"]:
                                crawler_settings["RETRY_HTTP_CODES"].remove(code)
                    crawler_settings[setting] = _custom_settings[setting]
            except:
                pass

    def custom_init(self, *args, **kwargs):
        self.logger.info('custom_init not overwritten')

    def busy(self):
        self._last_req_time = time.time()
