package com.example.aggregation;

import com.example.bean.WaterSensor;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * Created with IntelliJ IDEA.
 * ClassName: MapDemo
 * Package: com.example.transformation
 * Description:
 * User: fzykd
 *
 * @Author: LQH
 * Date: 2023-07-18
 * Time: 16:03
 */

public class SumKeyByDemo {
    public static void main(String[] args) throws Exception {
        //1.创建环境
        StreamExecutionEnvironment env =
                StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);
        //fromElements 是直接赋值数据
        DataStreamSource<WaterSensor> data = env.fromElements(
                //赋值WaterSensor对象的数据
                new WaterSensor("s1", 1L, 1),
                new WaterSensor("s1", 11L, 11),
                new WaterSensor("s2", 2L, 2),
                new WaterSensor("s3", 3L, 3)
        );

        //按照key ID分组
        //1.返回的是一个KeyedStream
        //2.说明KeyBy不是转换算子 只是对数据进行分区 是有规律的 不是随机打散的
        //3.不能设置并且度 这个和Hadoop中的分区和ReduceTask有点想
        /**
         * keyby分组和分区的关系
         *keyby是对数据分组 保证相同key的数据在同一个分区
         * 分区 可以理解为一个子任务
         */
        KeyedStream<WaterSensor, String> sensorKS = data.keyBy(new KeySelector<WaterSensor, String>() {
            @Override
            public String getKey(WaterSensor value) throws Exception {
                //按照ID进行分组
                return value.getId();
            }
        });

        //简单聚合算子 只用通过 keyedStream之后才会有
        //sum 参数有两个 1 直接传递位置索引 2 传递字段名
        //1.适合Tuple类型  2.适合Bean对象
        SingleOutputStreamOperator<WaterSensor> sum = sensorKS.sum("vc");

        //不做任何处理直接输出 没有什么区别
        sum.print();

        env.execute();
    }

}
