import os
import time
import signal
import schedule
from multiprocessing import Process
from scrapy.settings import Settings
from scrapy.crawler import CrawlerProcess

__all__ = [
    'get_project_settings', 'get_crawler_process', 'start_process'
]


def get_project_settings():
    settings = Settings()
    settings.setmodule('crawler.settings.settings', priority='project')
    return settings


def get_crawler_process():
    process = CrawlerProcess(settings=get_project_settings())
    return process


def start_crawl(spider):
    """scrapy进程启动器"""
    # 获取项目标识

    process = get_crawler_process()
    process.crawl(spider)
    process.start(False)


def term(sig_num, addtion):
    print("current pid is %s, group id is %s" % (os.getpid(), os.getpgrp()))
    os.killpg(os.getpgid(os.getpid()), signal.SIGTERM)


def start_monitor(spider_list):
    """根据需求开启容器监控模式"""
    from crawler.monitor import ContainerMonitor
    from crawler.logger import other_logger
    other_logger.info('Start Container Monitor')
    spider_name_list = list()
    for spider_info in spider_list:
        if spider_info:
            spider_name_list.append(spider_info[0].name)
    container_monitor = ContainerMonitor(spider_name_list)
    # schedule.every(2).hours.do(container_monitor.run_monitor)
    while True:
        schedule.run_pending()
        time.sleep(1)


def start_process(spider_list):
    """根据需求，开启多个进程，节省容器资源"""
    processes = []
    signal.signal(signal.SIGTERM, term)

    # 装载容器进程监控，等待后续启动进程
    process_obj = Process(target=start_monitor, args=(spider_list,))
    processes.append(process_obj)

    # 装载容器进程，等待后续启动进程
    for spider_info in spider_list:
        spider = spider_info[0]
        process_num = spider_info[1]
        # project_type = spider_info[2]
        for i in range(process_num):
            process_obj = Process(target=start_crawl, args=(spider,))
            processes.append(process_obj)

    [p.start() for p in processes]  # 根据设置的进程数，开启了多个进程
    [p.join() for p in processes]  # 等待多个进程依次结束，实际并不会结束
