package com.atguigu.flink.chapter05.Transform;

import com.atguigu.flink.bean.WaterSensor;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * TODO
 *
 * @author cjp
 * @version 1.0
 * @date 2021/8/9 10:30
 */
public class Flink05_Keyby {
    public static void main(String[] args) throws Exception {
        // 0.执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 1.读取数据
        //  sensor_1,123123213,10
        DataStreamSource<String> socketDS = env.socketTextStream("localhost", 9999);

        //
        SingleOutputStreamOperator<WaterSensor> sensorDS = socketDS.map((MapFunction<String, WaterSensor>) value -> {
            String[] datas = value.split(",");
            return new WaterSensor(
                    datas[0],
                    Long.parseLong(datas[1]),
                    Integer.parseInt(datas[2])
            );
        });

        //TODO keyby

        // TODO 1. keyby - 传位置  ： 只有 元组的 数据类型，才可以用
//        KeyedStream<WaterSensor, Tuple> resultDS = sensorDS.keyBy(0);
        // TODO 2. 传字段名：只有  POJO ，才可以传 字段名
//        KeyedStream<WaterSensor, Tuple> resultDS = sensorDS.keyBy("id");
        // TODO 3. 传 KeySelector
//        KeyedStream<WaterSensor, String> resultDS = sensorDS.keyBy(new KeySelector<WaterSensor, String>() {
//            @Override
//            public String getKey(WaterSensor value) throws Exception {
//                return value.getId();
//            }
//        });

        KeyedStream<WaterSensor, String> resultDS = sensorDS.keyBy(sensor -> sensor.getId());

        resultDS.print();

        env.execute();
    }
}
/*
    keyGroupId * parallelism / maxParallelism
        parallelism    => keyby下游算子的并行度, 必须 < 最大并行度
        maxParallelism => 最大并行度，默认 128 ， 最大不超过 Short的最大值
        keyGroupId     => 两次hash
                            第一次： key自身的 hashcode方法
                            第二次： murmurhash(keyhash) % 最大并行度(128)
                          => 取值范围 [0,最大并行度)
 */