from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
from scrapyd_api import ScrapydAPI
import redis
from datetime import datetime
import sys
sys.path.append("..")
from analysis.news_analyse import get_keywords, get_topwords
from news_spiders.news_spiders import settings
from myapp.utils.connection import POOL

scrapyd = ScrapydAPI('http://localhost:6800')
# 上次提交的关键词
last_key = ''
# 163,sina 是否启动
is_news_started = False
# 关键词集合
key_set = []
# 日期范围,给微博爬虫用
daterange = []

redis_pool = redis.ConnectionPool(
    host=settings.REDIS_HOST, port=settings.REDIS_PORT, password='')
r = redis.Redis(connection_pool=redis_pool)

# 获取爬虫任务列表
@require_http_methods(["GET"])
def schedulelist(request):
    response = {}
    try:
        response['data'] = {}
        response['data']['news_spiders'] = scrapyd.list_jobs('news_spiders')
        response['data']['weibo_spiders'] = scrapyd.list_jobs('weibo_spiders')
    except Exception as e:
        response['msg'] = str(e)
    return JsonResponse(response)


# 取消所有任务
@require_http_methods(["GET"])
def cancel_all(request):
    response = {}
    global last_key
    global key_set
    last_key = ''
    key_set = []
    try:
        stop_spiders()
        response['ok'] = 1
    except Exception as e:
        response['msg'] = str(e)
    return JsonResponse(response)

# 启动新闻爬虫
@require_http_methods(["GET"])
def start_news(request):
    response = {}
    global is_news_started
    global last_key
    global key_set
    key = request.GET.get('key')
    if key is None:
        response['msg'] = 'Empty keyword! '
        return JsonResponse(response)
    # if is_news_started is False:
    #     start_sina_163(2)
    try:
        # 关键词相同跳过
        if last_key == key:
            response['msg'] = 'Jobs are already running'
            return JsonResponse(response)
        key_set.append(key)
        response['jobs'] = []
        running = scrapyd.list_jobs('news_spiders')['running']

        # 先取消再运行
        for job in running:
            if job['spider'] == 'baiduSearch':
                scrapyd.cancel(job['project'], job['id'])
        clearRedisQueue()
        response['jobs'].append(scrapyd.schedule(
            'news_spiders', 'baiduSearch', kw=key, site='163.com'))
        response['jobs'].append(scrapyd.schedule(
            'news_spiders', 'baiduSearch', kw=key, site='news.sina.com.cn'))
        last_key = key
    except Exception as e:
        response['msg'] = str(e)
    return JsonResponse(response)


# 启动sina，163新闻爬虫
@require_http_methods(["GET"])
def start_sina_163(request):
    global is_news_started
    response = {}
    if is_news_started is True:
        response['msg'] = 'sina_163 is already started.'
        return JsonResponse(response)
    num = 2
    for i in range(num):
        scrapyd.schedule('news_spiders', 'neteaseNews')
        scrapyd.schedule('news_spiders', 'sinaNews')
    is_news_started = True
    response['ok'] = is_news_started
    return JsonResponse(response)

# 停止所有爬虫
def stop_spiders():
    global is_news_started
    global last_key
    running = scrapyd.list_jobs('news_spiders')['running']
    running += scrapyd.list_jobs('news_spiders')['pending']
    running += scrapyd.list_jobs('weibo_spiders')['running']
    running += scrapyd.list_jobs('weibo_spiders')['pending']
    for job in running:
        scrapyd.cancel(job['project'], job['id'])
    clearRedisQueue()
    # last_key = ''
    is_news_started = False

# 清除redis队列
def clearRedisQueue():
    r.delete('neteaseNews:start_urls')
    r.delete('sinaNews:start_urls')

# 获取当前关键词
@require_http_methods(["GET"])
def get_cur_top_words(request):
    global key_set
    global daterange
    response = {}
    # try:
    key = request.GET.get('key', '')
    response = get_topwords(key_set, key)
    daterange = response['daterange']
    # except Exception as e:
    #     response['msg'] = str(e)
    return JsonResponse(response)

# 启动微博爬虫
@require_http_methods(["GET"])
def start_weibo(request):
    global last_key
    global daterange
    response = {}
    stop_spiders()
    response = get_topwords(key_set)
    daterange = response['daterange']
    stime = '20200101'
    etime = datetime.now().strftime("%Y%m%d")
    if len(daterange) == 2:
        stime = daterange[0]
        etime = daterange[1]
    scrapyd.schedule('weibo_spiders', 'advSearch', kw=last_key, stime=stime, etime=etime)
    res = get_final_words()
    return JsonResponse(res)


# 获取当前正在爬取的新闻
@require_http_methods(["GET"])
def get_cur_news(request):
    response = {}
    response['data'] = select_news()
    return JsonResponse(response)

# 获取当前正在爬取的微博
@require_http_methods(["GET"])
def get_cur_weibo(request):
    response = {}
    response['data'] = select_weibo()
    return JsonResponse(response)


# --------------------------------
def select_news():
    global last_key

    conn = POOL.connection()
    cursor = conn.cursor()
    data = []
    # SQL 查询语句
    sql1 = "SELECT newsID, title FROM `%s` WHERE has_show = 0 ORDER BY crawl_time desc LIMIT 3" % (
        last_key.replace(" ", "_") + '_sinanews')
    try:
        # 执行SQL语句
        cursor.execute(sql1)
        # 获取所有记录列表
        results = cursor.fetchall()
        updateIDlist = []
        for it in results:
            data.append(it[1])
            updateIDlist.append((it[0]))
        sql1 = "UPDATE `{}` SET has_show=1 where newsID=%s".format(last_key.replace(" ", "_") + '_sinanews')
        cursor.executemany(sql1, updateIDlist)
        conn.commit()

    except Exception as e:
        print('[_sinanews 查询出错！]')


    sql2 = "SELECT newsID, title FROM `%s` WHERE has_show = 0 ORDER BY crawl_time desc LIMIT 3" % (
        last_key.replace(" ", "_") + '_neteasenews')
    try:
        # 执行SQL语句
        cursor.execute(sql2)
        # 获取所有记录列表
        results = cursor.fetchall()
        updateIDlist = []
        for it in results:
            data.append(it[1])
            updateIDlist.append((it[0]))
        sql2 = "UPDATE `{}` SET has_show=1 where newsID=%s".format(last_key.replace(" ", "_") + '_neteasenews')
        cursor.executemany(sql2, updateIDlist)
        conn.commit()
    except Exception as e:
        print('[_neteasenews 查询出错！]')
    return data


def select_weibo():
    global last_key

    conn = POOL.connection()
    cursor = conn.cursor()
    data = []
    # SQL 查询语句

    sql = "SELECT mid, text FROM `%s` where has_show = 0 ORDER BY crawl_time desc LIMIT 2" % (
        last_key.replace(" ", "_") + '_advsearch')
    try:
        # 执行SQL语句
        cursor.execute(sql)
        # 获取所有记录列表
        results = cursor.fetchall()
        updateIDlist = []
        for it in results:
            text = it[1]
            if len(text) > 40:
                text = text[:40]
            data.append(text)
            updateIDlist.append((it[0]))
        sql2 = "UPDATE `{}` SET has_show=1 where mid=%s".format(last_key.replace(" ", "_") + '_advsearch')
        cursor.executemany(sql2, updateIDlist)
        conn.commit()
    except Exception as e:
        print('[_advsearch 查询出错！]')
    return data


def get_final_words():
    global key_set
    word_set = set()
    for key in key_set:
        for i in key.split():
            word_set.add(i)
    word_set = list(word_set)
    response = {"nodes": [], "links": [], "categories": []}

    size = len(word_set)
    for i in range(size):
        node = {"id": str(i),
                "name": word_set[i],
                "symbolSize": 50,
                "value": 50,
                "category": i}
        response['nodes'].append(node)
    for i in range(size):
        response['categories'].append({'name': word_set[i]})
    return response
