# coding:utf-8
#! /usr/bin/env python
'''
* * * * *   /opt/elasticsearch_test/run_sever.py
49 */2 * * *  cd /opt/scrapy/bid && /opt/scrapy/bid/run_all.py
*/4 * * * *  ps -eo pid,etime,cmd | grep "scrapy crawl" | awk '$2~/^([0-9]+-)?([0-9]{2}:){2}[0-9]{2}$/ && $2>="01:00:00" {print $1}' |xargs kill -9 
'''

import os,subprocess
from concurrent.futures import ThreadPoolExecutor
import time
from elasticsearch import Elasticsearch

es = Elasticsearch(hosts='http://127.0.0.1:9200')
try:
    es.delete_by_query(index='bid',body={'query':{
    "range": {
        "time": {
            "gte": "now-10y",  # // 当前时间的上一天, 四舍五入到最近的一天
            "lt": "now-1y"  # // 当前时间, 四舍五入到最近的一天
        }
    }}
},doc_type='_doc')
except:
    pass
dirs = os.listdir('./bid/spiders/')
rules_list = []
for file in dirs:
    if '.py' not in file or 'init' in file:
        continue
    rules_list.append(f'scrapy crawl {file.replace(".py","")} --nolog')

def run_rule(cmd):
    name = cmd.split(" ")[-2]
    print(f'{name}---开始执行')
    t = time.time()
    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    for i in iter(p.stdout.readline, 'b'):
        if not i:
            break
        output = i.decode('utf-8')
        print(output)
    print(name,'结束')
with ThreadPoolExecutor(3) as pool:
    pool.map(run_rule, rules_list)