#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2021/8/3 11:43
# @Author  : Samge
import time
from scrapy.crawler import CrawlerProcess


# 自定义爬虫命令与传递日志队列到爬虫
from itkz.resources.utils import class_util


def default_crawl(log_queue, spider_data):
    """
    默认的爬虫配置方法
    :param log_queue: 日志打印队列，用于将爬虫的日志信息传到pyqt中显示
    :param spider_data: 自定义的爬虫配置项，相关参数会配置到爬虫类属性中
    :return:
    """
    if not log_queue:
        raise ValueError('请配置日志消息队列：log_queue')
    spider_path = spider_data.get('spider_path')
    if not spider_path:
        log_queue.put('请先配置爬虫类路径: spider_data 中的 【spider_path】')
    use_proxy = spider_data.get('use_proxy')
    use_agent = spider_data.get('use_agent')
    use_splash = spider_data.get('use_splash')

    start_time = time.time()
    log_queue.put(f"正在进行反射实例化爬虫对象：{spider_path}")
    spider_obj = class_util.get_class(class_path=spider_path)
    log_queue.put(f"实例化完成，共耗时：共耗时：{time.time() - start_time} 秒")

    custom_settings = spider_obj.custom_settings
    custom_settings['IS_USE_PROXY'] = use_proxy
    custom_settings['IS_USE_AGENT'] = use_agent
    custom_settings['IS_USE_SPLASH'] = use_splash
    process = CrawlerProcess(settings=custom_settings)
    process.crawl(spider_obj, log_queue=log_queue, spider_data=spider_data)
    process.start()
