#! /usr/bin/env Python
# coding:utf-8
import scrapy

from scrapy.crawler import CrawlerProcess
from scrapy.conf import  settings
from CollectSpider.service.task import  init_engine

init_engine()
process=CrawlerProcess(settings)
process.crawl("baidu_spider",**dict({u'spider_urls': [{u'baidu_spider': u'http://news.baidu.com/ns?word=\u5927\u6570\u636e&pn=0&cl=2&ct=1&tn=news&rn=20&ie=utf-8&bt=0&et=0&rsv_page=1'}], u'page_count': 40, u'pid': 79848, u'time_last': 1448338550, u'id': 38, u'spider_names': [u'baidu_spider'], u'task_type': 2, u'time_start': 1448338509, u'search_key': u'\u5927\u6570\u636e', u'count_thread_max': 2, u'search_id': u'10', u'time_interval': 0, u'task_name': u'\u65b0\u95fb\u91c7\u96c62', u'count_thread': 1, u'delete': 0, u'count_failed': 0, u'enabled': 1, u'schedule_type': 1, u'rule_match': u'', u'task_status': 1, u'count_success': 140, u'url_start': u'http://news.baidu.com/ns?word=\u5927\u6570\u636e&pn=0&cl=2&ct=1&tn=news&rn=20&ie=utf-8&bt=0&et=0&rsv_page=1'}))
# google_spider=GoogleSpider()
# process.crawl(google_spider)

process.start()

