from pyflink.common import SimpleStringSchema, WatermarkStrategy, Time, Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors import DeliveryGuarantee
from pyflink.datastream.connectors.kafka import KafkaSource, KafkaOffsetsInitializer, KafkaSink, \
    KafkaRecordSerializationSchema
import json

from pyflink.datastream.window import SlidingProcessingTimeWindows

# 1、创建flink执行环境
env = StreamExecutionEnvironment.get_execution_environment()

# 并行度设置
# 1、代码中设置，优先级最高：env.set_parallelism(1)
# 2、参数设置：-p
# 3、配置文件中设置，默认是1，优先级最低

# 设置原则
# 根据数据吞吐量计算
# 1、聚合时每个并行度吞吐量在1000/s-10000/s左右
# 2、非聚合计算时每个并行度吞吐量在10000/s-100000/s左右
# 比如数据源每秒10000条数据，代码中有聚合算子，并行度设置5个左右，再根据实际效果调整

# 2、读取数据
source = KafkaSource.builder() \
    .set_bootstrap_servers("master:9092") \
    .set_topics("parallelism") \
    .set_group_id("my-group") \
    .set_starting_offsets(KafkaOffsetsInitializer.latest()) \
    .set_value_only_deserializer(SimpleStringSchema()) \
    .build()

lines_ds = env.from_source(source, WatermarkStrategy.no_watermarks(), "Kafka Source")

words_ds = lines_ds.flat_map(lambda line: line.split(","))

kv_ds = words_ds.map(lambda word: (word, 1))

key_by_ds = kv_ds.key_by(lambda kv: kv[0])

count_ds = key_by_ds.reduce(lambda x, y: (x[0], x[1] + y[1]))

count_ds.print()

env.execute()
