# -*- coding: utf-8 -*-

# Define here the models for your scraped Extensions
import sys
import json
import redis
from scrapy import signals
from scrapy.exceptions import NotConfigured
from logging import getLogger
from RabbitmqConn import RabbitConn
from config import RABBITMQ_CONFIG

logger = getLogger(__name__)


class RedisSpiderSmartIdleClosedExensions(object):

    def __init__(self, idle_number, crawler):
        self.crawler = crawler
        self.idle_number = idle_number
        self.idle_list = []
        self.idle_count = 0
        self.mq_task = RabbitConn(RABBITMQ_CONFIG, 'task')

    def __del__(self):
        self.mq_task.close()

    @classmethod
    def from_crawler(cls, crawler):
        # first check if the extension should be enabled and raise

        # NotConfigured otherwise

        if not crawler.settings.getbool('MYEXT_ENABLED'):
            logger.info("settings文件未配置空跑时间, MYEXT_ENABLED")
            raise NotConfigured
        #
        # if not 'redis_key' in crawler.spidercls.__dict__.keys():
        #
        #     raise NotConfigured('Only supports RedisSpider')

        # get the number of items from settings

        idle_number = crawler.settings.getint('IDLE_NUMBER', 2)

        # instantiate the extension object

        ext = cls(idle_number, crawler)

        # connect the extension object to signals

        crawler.signals.connect(ext.spider_opened, signal=signals.spider_opened)

        crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed)

        crawler.signals.connect(ext.spider_idle, signal=signals.spider_idle)

        return ext

    def spider_opened(self, spider):
        spider.logger.info("opened spider {}, Allow waiting time:{} second".format(spider.name, self.idle_number*5))

    def spider_closed(self, spider):

        if spider.server.zcard(spider.name+':requests'):
            rule_info = dict(
                task_id=spider.task_id,
                company_id=spider.company_id,
                clue_id=spider.clue_id,
                clue_name=spider.clue_name,
                url=spider.start_urls[0],
            )

            rule_json = json.dumps(rule_info)
            repush_count = 0

            while repush_count < 3:
                try:
                    self.mq_task.send_data(rule_json)
                    logger.info("重新生成任务, 传入队列 : {}".format(rule_json))
                    break
                except redis.exceptions.WatchError:
                    pass
                repush_count += 1
            logger.info("任务处理异常, 爬虫关闭")
        else:
            dupe_name = spider.name+':dupefilter'
            spider.server.delete(dupe_name)
            logger.info('清除去重信息: {}'.format(dupe_name))
            # self.redis_conn.delete(persist_name)
            # logger.info('清除队列缓存信息 : {}'.format(persist_name))
        spider.mq_conn.close()
        spider.logger.info("closed spider {}, Waiting time exceeded {} second".format(spider.name, self.idle_number*5))

    def spider_idle(self, spider):
        # 程序启动的时候会调用这个方法一次，之后每隔5秒再请求一次
        # 判断是否存在 redis_key
        if not spider.server.exists("clue_id_"+spider.clue_id+":requests"):
            self.idle_count += 1
        else:
            self.idle_count = 0

        if self.idle_count > self.idle_number:
            # 执行关闭爬虫操作
            self.crawler.engine.close_spider(spider, 'Waiting time exceeded')