import json
from scrapy.spiders import Spider
from scrapy.http import Request
from squirrel_core.commons.utils.tools import gen_rowkey, calc_str_md5, str_to_bool
from squirrel_core.item.base_item import BaseItem
from squirrel_core.middleware import USER_AGENT_MIDDLEWARE_CLASS_PATH, PROXY_MIDDLEWARE_CLASS_PATH, \
    REQUEST_HASH_MIDDLEWARE_CLASS_PATH, ITEM_CHECK_SPIDER_MIDDLEWARE_CLASS_PATH, \
    REDIRECT_MIDDLEWARE_CLASS_PATH, DEFAULT_REDIRECT_MIDDLEWARE_CLASS_PATH
from squirrel_core.frame.spiders import Spiders
from squirrel_core.commons.utils.logger import Logging

from squirrel_core.pipeline import BASE_PIPELINE_CLASS_PATH, PROCEDURE_PIPELINE_CLASS_PATH

__all__ = ["MakakaSpider"]


class MakakaSpider(Spiders, Spider):
    name = 'MakakaSpider'
    server = None
    allowed_domains = []
    custom_settings = {
        'ITEM_PIPELINES': {
            PROCEDURE_PIPELINE_CLASS_PATH: 300,
            BASE_PIPELINE_CLASS_PATH: 400,
        },
        'DOWNLOADER_MIDDLEWARES': {
            USER_AGENT_MIDDLEWARE_CLASS_PATH: 998,
            PROXY_MIDDLEWARE_CLASS_PATH: 1000,
            REQUEST_HASH_MIDDLEWARE_CLASS_PATH: 900,
            DEFAULT_REDIRECT_MIDDLEWARE_CLASS_PATH: None,
            REDIRECT_MIDDLEWARE_CLASS_PATH: 600
        },
        'SPIDER_MIDDLEWARES': {
            ITEM_CHECK_SPIDER_MIDDLEWARE_CLASS_PATH: 920,
        },
        'SCRAPE_COUNT_KEY_NAME': 'SCRAPE_COUNT_KEY_NAME',
        'COOKIES_ENABLED': True,
        # 'SCHEDULER': 'scrapy.core.scheduler.Scheduler',
        # 'DUPEFILTER_CLASS': 'scrapy.dupefilters.RFPDupeFilter',
        'RETRY_TIMES': 3,
        'DOWNLOAD_DELAY': 0,
    }

    def __init__(self, *args, **kwargs):
        crawler_config = kwargs.get('config', {})
        self.logger = Logging()
        kwargs['logger'] = self.logger
        super(MakakaSpider, self).__init__(*args, **kwargs)
        Spider.__init__(self)
        self.stats = None
        self.logger.info(f"初始化爬虫：{self.name}")
        self.job_started_in_spider = False

        self.queues = kwargs.get('queue')

        self.thread_index = kwargs.get('thread_index', -1)
        self.env = crawler_config.get("env", "")
        if self.env == "test":
            self.name = f"{self.env}_{crawler_config.get('spider_name', 'Comm_crawler')}"
        else:
            self.name = crawler_config.get('spider_name', 'Comm_crawler')
        self.enable_source_item_check = False
        self.use_proxy = str_to_bool(crawler_config.get('proxy_config').get('use_proxy'))
        spider_config = crawler_config.get('spider_config', {})

        domains = spider_config.get('domains', [])
        self.update_allow_domains(domains)

        self.name_second = crawler_config.get('name_second')
        self.name_first = crawler_config.get('name_first')
        self._keymsg_t = {'name_second': self.name_second, 'name_first': self.name_first}
        self.q_lc = None
        self.s_lc = None

        self.force_to_close_spider = False
        try:
            self.custom_init(*args, **kwargs)
        except Exception as e:
            self.logger.debug(f'custom_init error, {e}')

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        config = kwargs.get('config', {})
        cls._update_settings_in_from_cralwer(crawler, config)
        obj = super(MakakaSpider, cls).from_crawler(crawler, *args, **kwargs)
        obj.update_field(crawler)
        obj.setup_db(crawler)
        cls.server = obj.server
        obj.stats = crawler.stats
        obj.crawler = crawler
        obj.custom_from_crawler(crawler)
        obj.init_requests = list(obj._get_request())
        return obj

    def custom_from_crawler(self, crawler):
        pass
        self.clear_start_urls(crawler)
        self.init_looping_calls()
        self.init_custom_signals()

    def init_custom_signals(self):
        pass

    def next_requests(self):
        self.close_after_idle = True
        for i in range(len(self.init_requests)):
            try:
                request = self.init_requests.pop()
                if not isinstance(request, Request):
                    self.logger.log_more('get_ext_requests must return Request object')
            except:
                raise StopIteration
            else:
                yield request

    def parse(self, response):
        yield self.result_item_assembler(response)

    def _get_request(self):
        self.job_started_in_spider = True
        _data = self.get_external_data()
        if _data:
            for i in self.get_ext_requests_or_urls(_data):
                if isinstance(i, str):
                    i = Request(url=i)
                yield i

    def get_external_data(self):
        retry_time = 3
        while retry_time:
            try:
                data = self.queues[0].get(timeout=0.05)
                if data:
                    return data
            except:
                pass
            retry_time -= 1

    def result_item_assembler(self, response):
        item = BaseItem()
        # self.common_item_assembler(response, item)
        # item['rowkey'] = gen_rowkey(item, keys=('do_time', 'name_second'))
        # item['_id'] = calc_str_md5(json.dumps(dict(item), ensure_ascii=False))
        return item

    def upload_procedure(self, data):
        _data = {'_data': data, '_data_type': 1}
        self.queues[1].put(json.dumps(_data))

    def close_spider_when_idle(self):
        """
        Must called by RT Spider to stop spider
        :return:
        """
        self.close_after_idle = True
        self.force_to_close_spider = True
