# -*- coding: utf-8 -*-
# @Time    : 2024/03/06 16:11
# @Author  : Mr.su
# @FileName: start.py
# @FileDesc:
from CollectSpiders.settings import CRAWL_CONFIG
from CollectSpiders.toots.connects import RedisClient
import subprocess, json, os, requests, time, threading, hashlib, urllib.parse, datetime, copy


# noinspection PyMethodMayBeStatic
class SpiderScheduler:
    def __init__(self):
        self.config = self.get_config(True)
        self.start_path = path + '/CollectSpiders'
        self.spider_files_path = path + '/CollectSpiders/spiders'
        self.command = 'cd {} && scrapy crawl {} -a data={} -s JOBDIR=disk_file/{} &'

    def get_config(self, isfirst=False):
        # 每5分更新采集配置信息
        while True:
            # TODO 读取本地配置文件进行测试
            config = json.loads(open('./config.json', 'r', encoding='utf-8').read())
            # config = requests.get(CRAWL_CONFIG).json()
            with open('./config.json', 'w', encoding='utf-8') as f:
                f.write(json.dumps(config, ensure_ascii=False))
            if isfirst:
                return config
            time.sleep(300)

    def start_spider(self, spider_name, config, node_id):
        ts = '{}_{}'.format(spider_name, node_id)
        encode_data = urllib.parse.quote(json.dumps(config, ensure_ascii=False))
        command = self.command.format(self.start_path, spider_name, encode_data, ts)
        subprocess.run(command, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
        print(command)

    def open_thread(self, domain, node_id):
        while True:
            spider_file_name = '{}.py'.format(domain)
            if not os.path.exists('{}/{}'.format(self.spider_files_path, spider_file_name)):
                print('{} [info]: 爬虫不存在,等待300秒重新检测...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
                time.sleep(300)
                continue
            if domain not in self.config:
                break
            if self.config[domain]['status'] == 0:
                time.sleep(300)
                continue
            if node_id not in self.config[domain]['childs'].keys():
                break
            config = copy.deepcopy(self.config[domain])
            config['childs'] = {node_id: self.config[domain]['childs'][node_id]}
            t = threading.Thread(target=self.start_spider, args=(domain, config, node_id))
            t.start()
            print('{} [info]: {}-{} 开启爬虫...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), domain, node_id))
            time.sleep(self.config[domain]['childs'][node_id]['wait_time'])

    def make_md5(self, data):
        # 生成md5方法
        if isinstance(data, str): data = data.encode("utf-8")
        m = hashlib.md5()
        m.update(data)
        return m.hexdigest()

    def create_collect_spider(self, domain, node_id):
        spider_file_name = '{}.py'.format(domain)
        if not os.path.exists('{}/{}'.format(self.spider_files_path, spider_file_name)):
            print('{} <消费队列> [info]: 爬虫文件不存在,终止线程...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
            return
        if domain not in self.config:
            print('{} <消费队列> [info]: 爬虫配置不存在,终止线程...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
            return
        if self.config[domain]['status'] == 0:
            print('{} <消费队列> [info]: 爬虫状态未开启,终止线程...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
            return
        if node_id not in self.config[domain]['childs'].keys():
            print('{} <消费队列> [info]: 爬虫配置未添加,终止线程...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
        config = copy.deepcopy(self.config[domain])
        config['childs'] = {node_id: self.config[domain]['childs'][node_id]}
        t = threading.Thread(target=self.start_spider, args=(domain, config, node_id))
        t.start()
        print('{} <消费队列> [info]: {}-{} 开启爬虫...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), domain, node_id))

    def collect_queue(self):
        while True:
            try:
                count = redis_client.conn.llen(queue_name)
                if count < 1:
                    time.sleep(60)
                    continue
            except Exception as e:
                print('{} [error]: {} redis链接失败, 等待1分钟后重试...'.format(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), e))
                continue
            config = json.loads(redis_client.conn.rpop(queue_name))
            node_id = config.values()["childs"].keys()[0]
            t = threading.Thread(target=self.create_collect_spider, args=(config.keys(), node_id))
            t.start()

    def run(self):
        # 开启采集配置更新线程
        t_update = threading.Thread(target=self.get_config)
        t_update.start()

        # 开启消费队列线程-实时消费队列中的采集配置
        t_queue = threading.Thread(target=self.collect_queue)
        t_queue.start()

        # 开启爬虫
        for key, value in self.config.items():
            for node_id, node in value['childs'].items():
                t_spider = threading.Thread(target=self.open_thread, args=(key, node_id))
                t_spider.start()

        # 判断是否新增爬虫
        while True:
            time.sleep(60)
            config = json.loads(open('./config.json', 'r', encoding='utf-8').read())
            new_spider_lis = []
            # 判断是否否新增主节点
            if config.keys() == self.config.keys():
                # 判断是否有新增节点
                for key, value in config.items():
                    if value['childs'].keys() != self.config[key]['childs'].keys():
                        node_ids = list(set(value['childs'].keys()) - set(self.config[key]['childs'].keys()))
                        for node_id in node_ids:
                            new_spider_lis.append([key, node_id])
            else:
                # 新增爬虫的开启
                new_spiders = list(set(config.keys) - set(self.config.keys))
                for new_spider in new_spiders:
                    for node_id, node in config[new_spider]['childs'].items():
                        new_spider_lis.append([new_spider, node_id])
            self.config = config
            for spider in new_spider_lis:
                t_spider = threading.Thread(target=self.open_thread, args=(spider[0], spider[1]))
                t_spider.start()


if __name__ == '__main__':
    queue_name = 'collect_site'
    redis_client = RedisClient()
    path = os.getcwd().replace('\\', '/')
    s = SpiderScheduler()
    s.run()
