# from scrapy.cmdline import execute
# execute(['scrapy', 'crawl', 'sinaRollSpiderV2'])
# # execute(['scrapy', 'crawl', 'tencentSpider'])
# # execute(['scrapy', 'crawl', 'tencentRollSpider'])
# print("finish")


from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor 
from multiprocessing import Process
import os
# import sys


#开始执行爬虫
def run_proc(spider_name):
    print('子进程运行中，name= %s ,pid=%d...' % (spider_name, os.getpid()))
    process = CrawlerProcess(get_project_settings())
    process.crawl(spider_name)
    process.start() # the script will block here until the crawling is finished
 
if __name__=='__main__':
    print('父进程 %d.' % os.getpid())
    p = Process(target=run_proc, args=("tencentRollSpider",))
    print('子进程将要执行')
    p.start()
    p.join()
    p = Process(target=run_proc, args=("sinaRollSpiderV2",))
    p.start()
    p.join()




