# 爬虫监控（监控Redis任务池）
import redis
import threading
import time
import sys
from scrapyd_api import ScrapydAPI
from news_spiders.news_analyse import get_keywords
from news_spiders.run import cmdrun as news_run
from weibo_spiders.run import cmdrun as weibo_run


scrapyd = ScrapydAPI('http://localhost:6800')


def monitor_thread(r):
    start_flag = 0
    empty_flag = 0
    while True:
        netease_len = r.llen('neteaseNews:start_urls')
        sina_len = r.llen('sinaNews:start_urls')
        url_num = netease_len + sina_len
        print('[%s] REDIS start_urls' %
              time.strftime("%H:%M:%S", time.localtime()))
        print('  neteaseNews : %d' % netease_len)
        print('  sinaNews : %d' % sina_len)

        if url_num == 0:
            if start_flag == 1:
                empty_flag += 1
        else:
            empty_flag = 0
            start_flag = 1
        if empty_flag >= 2:
            # 超时，判定列表为空，开始启动微博爬虫
            return
        time.sleep(3)


# 启动微博爬虫
def start_weibo_spiders(kw, stime, etime):
    scrapyd.schedule('weibo_spiders', 'findSon', kw=kw)
    scrapyd.schedule('weibo_spiders', 'userSpider', kw=kw)
    scrapyd.schedule('weibo_spiders', 'advSearch',
                     kw=kw, stime=stime, etime=etime)


# 启动新闻爬虫
def start_news_spider(kw):
    scrapyd.schedule('news_spiders', 'neteaseNews')
    scrapyd.schedule('news_spiders', 'sinaNews')
    scrapyd.schedule('news_spiders', 'baiduSearch', kw=kw, site='news.163.com')
    scrapyd.schedule('news_spiders', 'baiduSearch',
                     kw=kw, site='news.sina.com.cn')


def main(kw):
    redis_pool = redis.ConnectionPool(host="localhost", port=6379, password='')
    r = redis.Redis(connection_pool=redis_pool)

    # 启动新闻爬虫
    # news_spider_thread = threading.Thread(target=news_spider_run, args=(kw,))
    # news_spider_thread.start()
    start_news_spider(kw)
    print("【新闻爬虫启动】")

    time.sleep(10)
    # 启动新闻爬虫监控
    print("【新闻爬虫监控启动】")
    news_mon_thread = threading.Thread(target=monitor_thread, args=(r,))
    news_mon_thread.start()
    news_mon_thread.join()
    # 新闻爬完返回

    # 新闻关键词提取
    print("【新闻关键词提取启动】")
    keywords, daterange = get_keywords(kw)
    searchwords = '%s %s %s' % (keywords[0], keywords[1], keywords[2])

    # 启动微博爬虫
    # weibo_spiders_thread = threading.Thread(
    #     target=weibo_spiders_run, args=(searchwords, daterange[0], daterange[1]))
    # weibo_spiders_thread.start()
    start_weibo_spiders(searchwords, daterange[0], daterange[1])


def cmd_run(kw):
    redis_pool = redis.ConnectionPool(host="localhost", port=6379, password='')
    r = redis.Redis(connection_pool=redis_pool)

    # 启动新闻爬虫
    news_run(kw)
    print("【新闻爬虫启动】")

    time.sleep(10)
    # 启动新闻爬虫监控
    print("【新闻爬虫监控启动】")
    news_mon_thread = threading.Thread(target=monitor_thread, args=(r,))
    news_mon_thread.start()
    news_mon_thread.join()
    # 新闻爬完返回

    # 新闻关键词提取
    print("【新闻关键词提取启动】")
    keywords, daterange = get_keywords(kw)
    searchwords = '%s %s %s' % (keywords[0], keywords[1], keywords[2])

    # 启动微博爬虫
    # weibo_spiders_thread = threading.Thread(
    #     target=weibo_spiders_run, args=(searchwords, daterange[0], daterange[1]))
    # weibo_spiders_thread.start()
    print(searchwords)
    weibo_run(searchwords, daterange[0], daterange[1])


if __name__ == "__main__":
    try:
        # kw = sys.argv[1]
        # print(kw)
        # run(kw, stime, etime)
        # run2(kw)
        # run2(kw)
        # main(kw)
        # start_news_spider("双黄连")
        cmd_run("双黄连")
        print('started')
    except Exception as e:
        print(e)
