from pyflink.common import Encoder, Types
from pyflink.datastream import StreamExecutionEnvironment, RuntimeExecutionMode
from pyflink.datastream.connectors.file_system import FileSink, RollingPolicy

# 1、创建flink执行环境
env = StreamExecutionEnvironment.get_execution_environment()

env.set_runtime_mode(RuntimeExecutionMode.BATCH)

# 2、读取数据
# 有界流
lines_ds = env.read_text_file("hdfs://master:9000/data/words.txt")

# 3、统计单词的梳理
# 一行转换成多行
words_ds = lines_ds.flat_map(lambda line: line.split(","))

# 转换成kv
kv_ds = words_ds.map(lambda word: (word, 1))

# 安装单词分组
key_by_ds = kv_ds.key_by(lambda kv: kv[0])

# 统计数量
count_ds = key_by_ds.sum(1)

# 整理数据
# 增加返回类型
result_ds = count_ds.map(lambda kv: f"{kv[0]},{kv[1]}", output_type=Types.STRING())

# 打印数据
# count_ds.print()


# 保存结果
# for_row_format: 指定数据的路径和格式
# with_rolling_policy：滚动策略，每隔一段时间或者文件达到一定大小，生成一个新的文件
sink = FileSink \
    .for_row_format("hdfs://master:9000/data/word_count", Encoder.simple_string_encoder("UTF-8")) \
    .with_rolling_policy(RollingPolicy.default_rolling_policy(
    part_size=1024 ** 3, rollover_interval=15 * 60 * 1000, inactivity_interval=5 * 60 * 1000)) \
    .build()

result_ds.sink_to(sink)

# 启动flink任务
env.execute()