package cn._51doit.flink.day01.sources;

import cn._51doit.flink.StreamWordCountV2;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSink;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;

import java.util.Properties;

/**
 * 默认情况下，多并行的DataStream，默认的并行度，与执行环境的并行度保持一致
 */
public class KafkaSource {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        System.out.println("执行环境的并行度：" + env.getParallelism());

        Properties properties = new Properties();

        properties.setProperty("bootstrap.servers", "node-1.51doit.cn:9092,node-2.51doit.cn:9092,node-3.51doit.cn:9092");
        properties.setProperty("group.id", "test777");
        properties.setProperty("auto.offset.reset", "earliest"); //如果没有记录历史偏移量就从头读


        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<String>(
                "wordcount",
                new SimpleStringSchema(),
                properties
        );

        DataStreamSource<String> lines = env.addSource(kafkaConsumer);

        int parallelism = lines.getParallelism();

        System.out.println("Source的并行度：" + parallelism);

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = lines.flatMap(new LineSplitor2());

        System.out.println("wordAndOne的并行度：" + wordAndOne.getParallelism());

        KeyedStream<Tuple2<String, Integer>, Tuple> keyed = wordAndOne.keyBy(0);

        System.out.println("keyedStream的并行度：" + wordAndOne.getParallelism());

        SingleOutputStreamOperator<Tuple2<String, Integer>> summed = keyed.sum(1);

        //设置指定DataStream的并行度
        summed.setParallelism(2);

        System.out.println("summed的并行度：" + summed.getParallelism());

        DataStreamSink<Tuple2<String, Integer>> sink = summed.print();

        System.out.println("sink的并行度：" + sink.getTransformation().getParallelism());

        //5.执行并挂起
        env.execute();


    }

    public static class LineSplitor2 implements FlatMapFunction<String, Tuple2<String, Integer>> {

        @Override
        public void flatMap(String in, Collector<Tuple2<String, Integer>> collector) throws Exception {
            String[] words = in.split(" ");
            for (String word : words) {
                collector.collect(Tuple2.of(word, 1));
            }
        }
    }

}
