from confluent_kafka import Consumer, KafkaException, KafkaError
import logging
import subprocess
import json
import time
from collections import defaultdict

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

conf = {
    'bootstrap.servers': 'node1:9092',
    'group.id': 'bigdata-group-16',
    'auto.offset.reset': 'earliest',
    'enable.auto.commit': False,
    'session.timeout.ms': 6000,
    'heartbeat.interval.ms': 1000
}

# 存储已启动的爬虫代码，避免重复启动
started_crawlers = defaultdict(bool)

def consume_messages():
    consumer = Consumer(conf)
    consumer.subscribe(['bigdata'])
    stock_codes = []

    try:
        while True:
            msg = consumer.poll(timeout=1.0)

            if msg is None:
                print("Heartbeat")  # 输出心跳信息
                continue
            if msg.error():
                if msg.error().code() == KafkaError._PARTITION_EOF:
                    continue
                else:
                    raise KafkaException(msg.error())
            else:
                message_value = msg.value()
                if not message_value:
                    continue

                if isinstance(message_value, bytes):
                    message_value = message_value.decode('utf-8')

                try:
                    data = json.loads(message_value)
                    code = data.get('code')
                    name = data.get('name')

                    if code and name and not started_crawlers[code]:
                        stock_codes.append((code, name))
                        started_crawlers[code] = True  # 标记为已启动
                        logging.info(f"已添加 {name} ({code}) 到待采集队列")

                    # 当累积了一定量的股票代码时，批量启动爬虫
                    if len(stock_codes) >= 10:  # 根据实际情况调整这个数字
                        start_crawlers(stock_codes)
                        stock_codes = []  # 清空队列
                        consumer.commit(asynchronous=True)
                except Exception as e:
                    logging.error(f"解析topic数据出错: {e}")
                    continue
    except KeyboardInterrupt:
        logging.info("Shutting down consumers...")
    finally:
        consumer.close()

def start_crawlers(stock_codes):
    processes = []
    for code, name in stock_codes:
        spider_process = subprocess.Popen(
            ["scrapy", "crawl", "stock_spider", "-a", f"code={code}"],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE
        )
        processes.append((spider_process, name, code))

    for p, name, code in processes:
        stdout, stderr = p.communicate()
        if p.returncode != 0:
            logging.error(f"{name}({code}) 数据写入失败, 失败原因: {stderr.decode()}")
        else:
            logging.info(f"{name}({code}) 数据写入成功")
            started_crawlers[code] = False  # 完成后允许再次启动

if __name__ == '__main__':
    consume_messages()