from scrapy import cmdline
import sys
import os
#sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.interval import IntervalTrigger
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings

import scrapy.spiderloader
import scrapy.statscollectors


import logging


def mainHandle():
    #print("begin hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")
    #process = CrawlerProcess(get_project_settings())
    # 'followall' is the name of one of the spiders of the project.
   # process.crawl('tianyaSpider')
    #process.start()  # the script will block here until the crawling is finished
    #os.system("scrapy crawl self_proxy")
    #os.system("scrapy crawl tianyaSpider")
    #os.system("scrapy crawl tianyaSpider")
    #cmdline.execute("scrapy crawl self_proxy".split())
    #cmdline.execute("scrapy crawl tianyaSpider".split())
    #cmdline.execute("scrapy crawl qunar".split())
    #cmdline.execute("scrapy crawl ctrip".split())
    cmdline.execute("scrapy crawl fang".split())

    #print("end hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")


#logger = logging.getLogger(__name__)
#logger.debug('hhhhhhhhhhhhhhhhhhhhh')

from twisted.internet import defer, threads,reactor
from twisted.python.failure import Failure

def handleThread(iX):
    if iX%2==0:
        return  iX*3
    else:
        raise Exception("Ffffff")

def errorHandle(failure):
    print ('cerr1: %s' % str(failure))

def callBack(ix):
    print(ix)

'''
fRet = threads.deferToThread(handleThread, 3)
fRet.addErrback (errorHandle)
fRet.addCallbacks(callBack)

reactor.run()
'''
mainHandle()
sched = BlockingScheduler()
sched.add_job(mainHandle, trigger=IntervalTrigger(seconds=30*60))
sched.start()