package org.example.day20241227.operators;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.file.src.FileSource;
import org.apache.flink.connector.file.src.FileSourceSplit;
import org.apache.flink.connector.file.src.impl.StreamFormatAdapter;
import org.apache.flink.connector.file.src.reader.BulkFormat;
import org.apache.flink.connector.file.src.reader.TextLineInputFormat;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @className: ReduceDemo
 * @author: 孙磊
 * @date: 2024/12/29 22:49
 * @Version: 1.0
 * @description: 在相同 key 的数据流上“滚动”执行 reduce。将当前元素与最后一次 reduce 得到的值组合然后输出新值。
 * 1.5.reduce算子,读一个数组数据,将数组中的数据进行累计求和/求平均值;
 */
public class ReduceDemo {
    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);

        BulkFormat<String, FileSourceSplit> bulkFormat =
                new StreamFormatAdapter<>(new TextLineInputFormat());

        FileSource<String> source = FileSource.forBulkFileFormat(
                bulkFormat,
                new Path("src/main/resources/words.txt")).build();

        KeyedStream<String, Tuple2<Character, Integer>> keyedStream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "fileSource")
                .flatMap(new FlatMapFunction<String, String>() {
                    @Override
                    public void flatMap(String s, Collector<String> out) throws Exception {
                        String[] words = s.split(" ");
                        for (String word : words) {
                            out.collect(word);
                        }
                    }
                })
                .keyBy(new KeySelector<String, Tuple2<Character, Integer>>() {
                    @Override
                    public Tuple2<Character, Integer> getKey(String value) throws Exception {
                        //将第一个字母相同的单词分到一个组中
                        return Tuple2.of(value.toUpperCase().charAt(0), 1);
                    }
                });
        keyedStream.reduce(new ReduceFunction<String>() {
            @Override
            public String reduce(String value1, String value2) throws Exception {
                //将同一个分区中的数据除最后一个元素外的其他元素转大写并拼接到一起
                return value1.toUpperCase().concat(value2);
            }
        }).print();
        env.execute();
    }

}
