from pyflink.table import (
    EnvironmentSettings,
    TableEnvironment,
    DataTypes
)


class FlinkBatchPusher:
    def __init__(self, kafka_broker: str, kafka_topic: str):
        self.kafka_broker = kafka_broker
        self.kafka_topic = kafka_topic

        # 初始化批模式 TableEnvironment
        settings = EnvironmentSettings.in_batch_mode()
        self.t_env = TableEnvironment.create(settings)

        # 定义 Kafka Sink（DDL）
        self.t_env.execute_sql(f"""
        CREATE TABLE kafka_sink (
            id INT,
            name STRING
        ) WITH (
            'connector' = 'kafka',
            'topic' = '{self.kafka_topic}',
            'properties.bootstrap.servers' = '{self.kafka_broker}',
            'format' = 'json',
            'scan.startup.mode' = 'latest-offset'
        )
        """)

        # 定义 Kafka Source（DDL）
        self.t_env.execute_sql(f"""
        CREATE TABLE kafka_source (
            id INT,
            name STRING
        ) WITH (
            'connector' = 'kafka',
            'topic' = '{self.kafka_topic}',
            'properties.bootstrap.servers' = '{self.kafka_broker}',
            'format' = 'json',
            'scan.startup.mode' = 'earliest-offset'
        )
        """)

    def push_from_list(self, data_list):
        """
        批量推送 Python list 到 Kafka
        data_list: [ (1, "Alice"), (2, "Bob") ]
        """
        schema = DataTypes.ROW([
            DataTypes.FIELD("id", DataTypes.INT()),
            DataTypes.FIELD("name", DataTypes.STRING())
        ])

        table = self.t_env.from_elements(data_list, schema)
        table.execute_insert("kafka_sink").wait()

    def fetch_from_kafka(self):
        """
        从 Kafka 批量获取数据（一次性读完）
        """
        result_table = self.t_env.from_path("kafka_source")
        results = result_table.execute().collect()
        return [(row.id, row.name) for row in results]
