import time, psutil, subprocess, multiprocessing
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from my_scrapy.spiders.qcwy import QcwySpider

class Op_scrapy():
    def __init__(self):
        self.spider_pid = 123456
        # 建立线程通信管道
        self.parent_conn, self.child_conn = multiprocessing.Pipe()

    def start(self, spider_name, params):
        if spider_name == 'qcwy':
            spider = QcwySpider
        else:
            return False
        # 建立scrapy线程的将线程管道参数、爬虫参数
        self.the_scrapy = multiprocessing.Process(target=start_crawl, args=(spider, params, self.child_conn))
        self.the_scrapy.start()
        self.spider_pid = self.the_scrapy.pid
        return True


    # ================如果scrapy还在运行，那么杀死进程================
    def stop_scrapy(self):
        # 根据pid判断，如果scrapy还在运行，那么结束进程,
        # 同时设定爬取详情窗口的is_scrapying为False终止tree里的循环'''
        if self.spider_pid in psutil.pids():  # main.py的继承类定义了该id，运行爬虫时设定
            print('scrapy还在运行！')
            time.sleep(1)
            subprocess.Popen("taskkill /pid %s /f" % self.spider_pid, shell=True)
    # ================如果scrapy还在运行，那么杀死进程================


    def check_scrapying(self):
        if self.spider_pid in psutil.pids():
            return True
        else:
            return False


def start_crawl(spider, params, pipe):
    process = CrawlerProcess(get_project_settings())
    # 将爬虫参数，通信管道参数发给scrapy，并启动
    process.crawl(spider, params=params, pipe=pipe)
    process.start()

