package com.yuhang.wordcount;

import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class StreamWordCount {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 每个算子中设置的并行度 》 全局设置的并行度 》 web页面设置的并行度 》 配置文件中的并行度
        env.setParallelism(1);
        // 全局取消任务链，这样要是前后可以合并，就不能合并了
//        env.disableOperatorChaining();

//        String inputPath = "D:\\Utils\\IDEA project\\Flink_java\\learnning\\src\\main\\resources\\hello.txt";
//
//        DataStreamSource<String> dataStreamSource = env.readTextFile(inputPath);

        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        String host = parameterTool.get("host");
        int port = parameterTool.getInt("port");

        // 从socket文本流读取数据，这个的并行度只能是1 设不设置都是1
        DataStream<String> dataStreamSource = env.socketTextStream(host, port);


        SingleOutputStreamOperator<Tuple2<String, Integer>> resultStream = dataStreamSource.flatMap(new WordCount.MyFlatMapFunction())
                .keyBy(0)      //根据key的hash值进行分区
                .sum(1).setParallelism(2); //.slotSharingGroup("11").startNewChain(); //设置slotSharingGroup共享组,startNewChain前面不要合并，后面合并

        resultStream.print().setParallelism(1);


        //流式处理的特点就是还得启动任务
        env.execute();

        //结果前面的数字代表启动的线程数，也可以理解为分区数

    }
}
