# -*- coding: utf-8 -*-
"""
@author:xieyabin
@file: runspiderkeyword_new.py
@time: 2019/01/23`
"""
#import sys
#import os
#from scrapy.cmdline import execute

# 获取当前文件所在目录
#current_dir = os.path.dirname(os.path.abspath(__file__))  # 当前文件的绝对路径，然后再找他的父级目录
#sys.path.append(current_dir)  # 将当前路径加入到path中

#execute(['scrapy', 'crawl', 'sogouweixinkeyword'])
import os
import pymysql
import redis
from KeywordSpider.settings import mysql_conf, redis_conf

r = redis.StrictRedis(host=redis_conf['host'], port=redis_conf['port'],
                      db=redis_conf['db'], password=redis_conf['passwd'])

# offset = 0  # 初始index为0，即从第1条记录开始
count = 5  # 一次爬取的关键词数量
# 每次启动前，先从redis读取当前位置
current_kw = r.get('sogouweixinkeyword')
# current_kw = r.delete('sogouweixinkeyword:requests')
# print(current_kw)

if current_kw:
    # 不是初次运行
    kid = None
else:  # 初次运行
    kid = 0  # 初始id，只要比他大的记录取五条即可

while True:
    if kid is not None:
        connect2 = pymysql.connect(
            host=mysql_conf.get('host'),
            port=mysql_conf.get('port'),
            database=mysql_conf.get('db'),
            user=mysql_conf.get('user'),
            password=mysql_conf.get('passwd'),
            charset=mysql_conf.get('charset'),
        )
        cursor2 = connect2.cursor()
        LEVEL_1 = 0
        LEVEL_2 = -1
        kw_sql = 'SELECT keyword,level,id from bx_caiji_keywords WHERE (level=%s or level=%s) and (id>%s) limit %s ;' % (LEVEL_1,LEVEL_2,kid, count)
        cursor2.execute(kw_sql)
        data = list(cursor2.fetchall())
        if len(data) == 0:
            cursor2.close()
            connect2.close()
            break
        data = str(data)
        cursor2.close()
        connect2.close()
    else:
        data = current_kw.decode('utf-8')
    # 爬取之前记录当前读取关键词的位置，以便接着爬
    r.set('sogouweixinkeyword', data)
    ret = os.system('scrapy crawl sogouweixinkeyword -a kws="%s"' % data)
    print(ret)
    if ret == 0:
        # offset += count
        # [('保险排名', 6), ('保险新闻最新消息', 7), ('保险公司排名', 8), ('大平洋保险', 9), ('中国车险十大排名2017', 10)]
        # 这样设置的目的是，在爬取过程中，数据库数据如果有删除，使用limit offset的方式就可能会丢失关键词
        kid = eval(data)[-1][2]
    else:
        kid = eval(data)[-1][2]
