# !/usr/bin/python
# encoding=utf-8
__title__ = "worker2"
__author__ = "Roy Xin"
__license__ = 'MIT'
__copyright__ = 'Copyright 2018, Roy Xin'
"""
#引入你的爬虫
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
import time
import logging
from scrapy.utils.project import get_project_settings
from shijue_auto_crawler.spiders.worker import WorkerSpider
from shijue_auto_crawler.model import WorkerRule
from RabbitmqConn import RabbitConn
from config import RABBITMQ_CONFIG
from logging import getLogger
import json

logger = getLogger(__name__)

#在控制台打印日志
configure_logging()
#CrawlerRunner获取settings.py里的设置信息
runner = CrawlerRunner(get_project_settings())

@defer.inlineCallbacks
def crawl():
    settings = get_project_settings()
    runner = CrawlerRunner(settings)
    mq_conn = RabbitConn(RABBITMQ_CONFIG, 'url')
    crawl_count = 0
    while crawl_count < 1000:
        method_frame, header_frame, body = mq_conn.channel.basic_get(
            queue=mq_conn.queue)
        if not body:
            logger.info('MQ队列暂无数据, 进程退出')
            time.sleep(5)
            break

        else:
            mq_conn.channel.basic_ack(delivery_tag=method_frame.delivery_tag)
            data = json.loads(body)
            rule = WorkerRule(data)
            yield runner.crawl(WorkerSpider, rule)
        crawl_count += 1
    mq_conn.close()
    reactor.stop()

crawl()
reactor.run() # the script will block here until the last crawl call is finished
"""

import sys
import time
import json
from logging import getLogger
from config import RABBITMQ_CONFIG
from RabbitmqConn import RabbitConn
from scrapy.crawler import CrawlerProcess,CrawlerRunner
from shijue_auto_crawler.model import WorkerRule
from scrapy.utils.project import get_project_settings
from shijue_auto_crawler.spiders.worker import WorkerSpider
from twisted.internet import reactor
from multiprocessing import Process,Queue

logger = getLogger('worker')

def run_spider(rule):
    def f(q,):
        try:
            runner.crawl(WorkerSpider,rule)
            deferred = runner.join()
            deferred.addBoth(lambda _: reactor.stop())
            reactor.run()
            q.put(None)
        except Exception as e:
            q.put(e)
    q = Queue()
    p = Process(target=f, args=(q,))
    p.start()
    result = q.get()
    p.join()
    if result is not None:
        raise result


if __name__ == '__main__':
    settings = get_project_settings()
    runner = CrawlerRunner(settings)
    mq_conn = RabbitConn(RABBITMQ_CONFIG, 'url')
    z = 0
    while z<500:
        z += 1
        method_frame, header_frame, body = mq_conn.channel.basic_get(
            queue=mq_conn.queue)
        if not body:
            print('MQ队列暂无数据, 进程退出')
            time.sleep(5)
            break

        else:
            mq_conn.channel.basic_ack(delivery_tag=method_frame.delivery_tag)
            data = json.loads(body)
            rule = WorkerRule(data)
            run_spider(rule)
    mq_conn.close()
    sys.exit()

