# -*- coding: utf-8 -*-
import os
import json
import time
import redis
from scrapy.log import logger
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy import signals, Request
from RabbitmqConn import RabbitConn
from scrapy.utils.log import configure_logging
from tools.tools import get_web_title
from scrapy_redis.spiders import RedisCrawlSpider
from config import RABBITMQ_CONFIG, REDIS_SETTINGS, LOG_PATH, LOG_LEVEL, URL_REGULAR_FILTER


class TaskSpider(RedisCrawlSpider):
    # name = 'imgspider'
    custom_settings = dict(
        REDIS_START_URLS_KEY = '%(name)s:start_urls',
        REDIS_HOST=REDIS_SETTINGS['HOST'],
        REDIS_PORT=REDIS_SETTINGS['PORT'],
        # 'JOBDIR': '/data/auto_requests_info/'
        SCHEDULER="scrapy_redis.scheduler.Scheduler",
        DUPEFILTER_CLASS="scrapy_redis.dupefilter.RFPDupeFilter",
        SCHEDULER_PERSIST=True,
        REDIS_PARAMS={
            "db":REDIS_SETTINGS['DB'],
            'password': REDIS_SETTINGS['PASSWORD']
        },
        MYEXT_ENABLED=True,      # 开启扩展
        IDLE_NUMBER=1,           # 配置允许的空闲时长，每5秒会增加一次IDLE_NUMBER，直到增加到360，程序才会close

        # 在 EXTENSIONS 配置，激活扩展
        EXTENSIONS={
                    'shijue_auto_crawler.extensions.RedisSpiderSmartIdleClosedExensions': 500,
                },
    )

    def __init__(self,rule):
        """
        self.redis_conn = redis.StrictRedis(
            host=REDIS_SETTINGS['HOST'],
            port=REDIS_SETTINGS['PORT'],
            db=REDIS_SETTINGS['DB'],
            # password=REDIS_SETTINGS['PASSWORD']
        )
        """
        self.log_setting()
        self.task_id = rule.task_id
        self.company_id = rule.company_id
        self.clue_id = rule.clue_id
        self.start_urls = rule.start_urls
        logger.info("起始url: {}".format(rule.start_urls))
        # self.redis_conn.lpush("clue_id_"+self.clue_id+":start_urls", rule.start_urls[0])

        self.name = "clue_id_{}".format(rule.clue_id)
        # self.redis_key = '{}:start_urls'.format(self.name)
        self.clue_name = rule.clue_name
        self.mq_conn = RabbitConn(RABBITMQ_CONFIG, 'url')
        self.queue_data = {}
        self.allowed_domains = rule.allowed_domains
        logger.info('allowed_domains : {}'.format(rule.allowed_domains))

        self.rules = (Rule(
            LinkExtractor(
               # allow=r'{}'.format(rule.allow_url)),
               # allow=r'.*'),
                deny=URL_REGULAR_FILTER,
                allow=''),
            callback='parse_item',
            follow=True),)
        super(TaskSpider, self).__init__()

    def log_setting(self):
        current_date = time.strftime("%Y%m%d", time.localtime(time.time()))
        log_path = os.path.join(LOG_PATH, "task_{}.log".format(current_date))
        configure_logging({'LOG_FILE': log_path, "LOG_LEVEL": LOG_LEVEL})

    def start_requests(self):
        web_title = get_web_title(self.start_urls[0])
        self.clue_name = web_title
        yield self.make_requests_from_url(self.start_urls[0])

    def parse_item(self, response):

        # 将返回的url 包装 发送至rabbit
        self.queue_data['url'] = response.url
        self.queue_data['clue_id'] = self.clue_id
        self.queue_data['clue_name'] = self.clue_name
        self.queue_data['company_id'] = self.company_id
        self.queue_data['task_id'] = self.task_id
        logger.debug('crawl spider running......{}'.format(response.url))
        self.process_item()

    def process_item(self):
        """
        返回子链接信息到queue
        :return:
        """
        body = json.dumps(dict(self.queue_data))
        self.mq_conn.send_data(body)
        logger.info('send data to rabbitmq: {}'.format(body))


    """
    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(TaskSpider, cls).from_crawler(crawler,*args,  **kwargs)
        crawler.signals.connect(spider.spider_closed, signals.spider_closed)
        return spider


    def spider_closed(self, spider):
        # 爬虫关闭操作，关闭rabbit连接，序列化rule信息，供下次persist读取
        if self.redis_conn.zcard(self.name+':requests'):
            rule_info = dict(
                task_id=self.task_id,
                company_id=self.company_id,
                clue_id=self.clue_id,
                clue_name=self.clue_name,
                url=self.start_urls[0],
            )

            rule_json = json.dumps(rule_info)
            repush_count = 0
            while repush_count < 3:
                try:
                    self.mq_task.send_data(rule_json)
                    break
                except redis.exceptions.WatchError:
                    pass
                repush_count += 1
            logger.info("任务处理异常, 爬虫关闭")
            logger.info("重新生成任务, 传入队列 : {}".format(rule_json))
        else:
            dupe_name = self.name+':dupefilter'
            self.redis_conn.delete(dupe_name)
            logger.info('清除去重信息: {}'.format(dupe_name))
            # self.redis_conn.delete(persist_name)
            # logger.info('清除队列缓存信息 : {}'.format(persist_name))
        self.mq_conn.close()
        logger.info('Closing {} spider'.format(spider.name))
        """



