# encoding: utf-8
import datetime

from elasticsearch_dsl import connections
from scrapy.utils.log import configure_logging
from twisted.internet import task, reactor

from defclass import HotType, MBlogType, LSTM
from scrapy.crawler import CrawlerProcess, CrawlerRunner
from scrapy.utils.project import get_project_settings
from apscheduler.schedulers.twisted import TwistedScheduler


# 获取配置
from weibo_spyder.spiders.m_hot import HotSpider
from weibo_spyder.spiders.m_weibo import WeiboSpider

settings = get_project_settings()
eshost = settings.get('ELASTICSEARCH_SERVER')
esport = settings.get('ELASTICSEARCH_PORT')
# username = settings.get('ELASTICSEARCH_USERNAME')
# password = settings.get('ELASTICSEARCH_PASSWORD')

# 新建连接
connections.create_connection(hosts=['http://'+eshost+':'+str(esport)])
# connections.create_connection(hosts=['http://'+eshost+':'+str(esport)], http_auth=(username, password))

# ElasticSearch建立索引
HotType.init(index="hot")
MBlogType.init(index='mblog')
print("ElasticSearch索引建立成功")

# 法一、串行运行爬虫
# from scrapy import cmdline
# cmdline.execute(['scrapy', 'crawl', 'm_hot'])
# cmdline.execute(['scrapy', 'crawl', 'm_weibo'])de

# 法二、CrawlerProcess并行运行爬虫
# settings = get_project_settings()
# crawler = CrawlerProcess(settings)
# crawler.crawl('m_hot')
# crawler.crawl('m_weibo')
# crawler.start()

# 法三、定时运行爬虫

# crawler = CrawlerProcess(settings)
# scheduler = TwistedScheduler()
# scheduler.add_job(crawler.crawl, 'interval', args=['m_hot'], minutes=2)
# scheduler.add_job(crawler.crawl, 'interval', args=['m_weibo'], minutes=2)
# scheduler.start()
# crawler.start(False)

# 创建CrawlerRunner
runner = CrawlerRunner()

# 配置日志
configure_logging()

# 定义要运行的爬虫列表
spiders = [HotSpider, WeiboSpider]

# 获取下一个整分钟时间
def next_run_time():
    now = datetime.datetime.now()+datetime.timedelta(minutes=2)
    next_minute = now.minute if now.minute % 2 == 0 else now.minute-1
    next_time = datetime.datetime(now.year, now.month, now.day, now.hour, next_minute, 0)
    return next_time

# 定义要运行的函数
def run_spiders():
    for spider in spiders:
        runner.crawl(spider)

# 创建TwistedScheduler
scheduler = task.LoopingCall(run_spiders)

# 设置TwistedScheduler开始执行时间，每两分钟运行一次
start_time = next_run_time()
print("下次执行时间：", start_time)
# if not scheduler.running:
    # scheduler.start(120.0, now=False)
reactor.callLater((start_time - datetime.datetime.now()).total_seconds(), scheduler.start, 120.0)

# 运行Twisted框架
reactor.run()


