from pyflink.common import WatermarkStrategy, SimpleStringSchema
from pyflink.datastream import StreamExecutionEnvironment, RuntimeExecutionMode
from pyflink.datastream.connectors.kafka import KafkaSource, KafkaOffsetsInitializer

# 1、创建flink执行环境
env = StreamExecutionEnvironment.get_execution_environment()

env.set_parallelism(1)

# 安装kafka依赖
# flink-sql-connector-kafka-3.2.0-1.18.jar
# 将依赖包放到flink lib目录下
# C:\ProgramData\miniconda3\envs\flink_env\Lib\site-packages\pyflink\lib

# set_bootstrap_servers： kafka 集群列表
# set_topics：指定消费的topic
# set_group_id 消费者组，一条数据在一个组内只消费一次
# set_starting_offsets： 消费数据的位置earliest： 从最早开始消费吗，latest:从最新开始消费
# 从消费组提交的位点开始消费，不指定位点重置策略
    # .set_starting_offsets(KafkaOffsetsInitializer.committed_offsets()) \
    # # 从消费组提交的位点开始消费，如果提交位点不存在，使用最早位点
    # .set_starting_offsets(KafkaOffsetsInitializer.committed_offsets(KafkaOffsetResetStrategy.EARLIEST)) \
    # # 从时间戳大于等于指定时间戳（毫秒）的数据开始消费
    # .set_starting_offsets(KafkaOffsetsInitializer.timestamp(1657256176000)) \
    # # 从最早位点开始消费
    # .set_starting_offsets(KafkaOffsetsInitializer.earliest()) \
    # # 从最末尾位点开始消费
    # .set_starting_offsets(KafkaOffsetsInitializer.latest())

# set_value_only_deserializer: 数据格式
source = KafkaSource.builder() \
    .set_bootstrap_servers("master:9092") \
    .set_topics("lines") \
    .set_group_id("my-group") \
    .set_starting_offsets(KafkaOffsetsInitializer.earliest()) \
    .set_value_only_deserializer(SimpleStringSchema()) \
    .build()

# 基于kafka构建DataStream (无界流)
lines_ds = env.from_source(source, WatermarkStrategy.no_watermarks(), "Kafka Source")

lines_ds \
    .flat_map(lambda line: line.split(",")) \
    .map(lambda word: (word, 1)) \
    .key_by(lambda kv: kv[0]) \
    .reduce(lambda x, y: (x[0], x[1] + y[1])) \
    .print()

env.execute()
