# # # pipelines.py
# import mysql.connector
# from eastmoney_scraper.items import StockItem
# import logging
# class StockPipeline:
#     def __init__(self):
#         self.logger = logging.getLogger(__name__)
#         self.items_buffer = []
#
#     def open_spider(self, spider):
#         self.connection = mysql.connector.connect(
#             host='192.168.220.131',
#             database='eastmoney',
#             user='root',
#             password='123456'
#         )
#         self.cursor = self.connection.cursor()
#         self.batch_size = 100  # 批量插入的数量
#
#     def close_spider(self, spider):
#         if self.items_buffer:
#             self._insert_items_batch(self.items_buffer)
#         self.cursor.close()
#         self.connection.close()
#
#     def process_item(self, item, spider):
#         self.items_buffer.append(tuple(item.values()))
#         if len(self.items_buffer) >= self.batch_size:
#             self._insert_items_batch(self.items_buffer)
#             self.items_buffer = []
#         return item
#
#     def _insert_items_batch(self, items):
#         insert_query = """
#             INSERT INTO stock_data (
#                 code, name, related_links, latest_price, price_change_percentage,
#                 price_change_amount, volume_hands, turnover_amount, amplitude,
#                 highest, lowest, opening_price, previous_close_price,
#                 turnover_rate, dynamic_pe_ratio, pb_ratio
#             ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
#         """
#         try:
#             self.cursor.executemany(insert_query, items)
#             self.connection.commit()
#             self.logger.info(f"Successfully inserted {len(items)} records.")
#         except Exception as e:
#             self.logger.error(f"Failed to insert batch of records: {e}")
#             self.connection.rollback()

# import redis
# from eastmoney_scraper.items import StockItem
# import logging
#
#
# class StockPipeline:
#     def __init__(self):
#         self.logger = logging.getLogger(__name__)
#         self.items_buffer = []
#         self.redis_client = redis.StrictRedis(host='localhost', port=6379, db=0)  # 初始化Redis客户端
#         self.id_key = 'stock:id'  # 用于存储自增ID的键名
#         self.batch_size = 100  # 批量插入的数量
#
#     def open_spider(self, spider):
#         # 确保自增ID键存在，如果不存在则初始化为0
#         if not self.redis_client.exists(self.id_key):
#             self.redis_client.set(self.id_key, 0)
#
#     def close_spider(self, spider):
#         if self.items_buffer:
#             self._insert_items_batch(self.items_buffer)
#         # Redis连接会自动管理，所以这里不需要显式的关闭操作
#
#     def process_item(self, item, spider):
#         self.items_buffer.append(item)
#         if len(self.items_buffer) >= self.batch_size:
#             self._insert_items_batch(self.items_buffer)
#             self.items_buffer = []
#         return item
#
#     def _insert_items_batch(self, items):
#         try:
#             pipeline = self.redis_client.pipeline()
#             for item in items:
#                 # 获取新的自增ID
#                 new_id = self.redis_client.incr(self.id_key)
#                 key = f"stock:{new_id}"  # 使用自增ID作为键名
#
#                 # 将item转换为字典并存入Redis哈希表
#                 pipeline.hmset(key, dict(item))
#
#                 # 如果您的redis版本是4.0以上，应该使用hset而不是hmset [ty-reference](9)
#                 # pipeline.hset(key, mapping=dict(item))
#
#             pipeline.execute()
#             self.logger.info(f"Successfully inserted {len(items)} records into Redis.")
#         except Exception as e:
#             self.logger.error(f"Failed to insert batch of records into Redis: {e}")

from eastmoney_scraper.items import StockItem
import logging
from confluent_kafka import Producer, KafkaError

class StockPipeline:
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.items_buffer = []
        self.kafka_producer = self._create_kafka_producer()  # 创建Kafka生产者
        self.kafka_topic = 'stockdatatest'  # Kafka的主题名
        self.batch_size = 100  # 批量插入的数量

    def _create_kafka_producer(self):
        """创建并返回一个Kafka生产者实例"""
        config = {
            'bootstrap.servers': '192.168.220.131:9092,192.168.220.132:9092,192.168.220.133:9092,192.168.220.134:9092',  # Kafka服务器地址
            'client.id': 'stock_pipeline_producer',
            'queue.buffering.max.messages': 10000000,  # 缓冲区的最大消息数量
            'queue.buffering.max.ms': 500,
            'compression.type': 'gzip',
            'acks': 'all'
        }
        return Producer(config)

    def open_spider(self, spider):
        pass  # 对于Kafka，这里可以为空

    def close_spider(self, spider):
        if self.items_buffer:
            self._insert_items_batch(self.items_buffer)
        # 确保所有消息都已发送完毕
        self.kafka_producer.flush()

    def process_item(self, item, spider):
        self.items_buffer.append(item)
        if len(self.items_buffer) >= self.batch_size:
            self._insert_items_batch(self.items_buffer)
            self.items_buffer = []
        return item

    def _insert_items_batch(self, items):
        try:
            for item in items:
                # 将item转换为字典并序列化为JSON字符串
                message_value = dict(item)
                self.kafka_producer.produce(
                    self.kafka_topic,
                    key=None,
                    value=str(message_value).encode('utf-8'),  # 注意：这里简单地将字典转换为字符串编码为UTF-8；在实际应用中应考虑更健壮的序列化方法如JSON
                    callback=self._delivery_report
                )

            # 调用poll以处理任何发送完成的消息或事件
            self.kafka_producer.poll(0)

            self.logger.info(f"Successfully inserted {len(items)} records into Kafka.")
        except Exception as e:
            self.logger.error(f"Failed to insert batch of records into Kafka: {e}")

    @staticmethod
    def _delivery_report(err, msg):
        """回调函数，在每条消息成功发送或失败时调用"""
        if err is not None:
            logging.error(f'Message delivery failed: {err}')
        else:
            logging.info(f'Message delivered to {msg.topic()} [{msg.partition()}] at offset {msg.offset()}')

# 如果需要在脚本中直接运行此管道，可以取消注释以下行
# if __name__ == '__main__':
#     pipeline = StockPipeline()
#     # 模拟一些数据处理...