package com.atguigu.flink.parallelism;

import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

public class StreamParallelismTest {

    public static void main(String[] args) throws Exception {

        //TODO 1.指定流处理环境
        Configuration conf = new Configuration();

        conf.set(RestOptions.PORT , 8888);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);

        env.setParallelism(2);

        //TODO 2.从指定端口读取数据
        //TODO 3.对数据进行扁平化处理
        //TODO 4.对流中的数据进行数据转换 String -> Tuple2
        //TODO 5.按照单词进行分组
        //TODO 6.计算
        //TODO 7.输出

        env
                .socketTextStream("hadoop102" , 8888)
                .flatMap(
                    (String line, Collector<String> collector) -> {
                        for (String words : line.split(" ")) {
                            collector.collect(words);
                        }
                    }
                )
                .disableChaining()
                .map(
                        (String words) -> {
                            return Tuple2.of(words , 1L);
                        }
                )
                .setParallelism(3)
                .startNewChain()
                .keyBy(data ->  data.f0)
                .sum(1)
                .print();

        //TODO 8.提交作业
        env.execute();

    }
}


