package com.atguigu.bigdata.chapter04;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @Author lzc
 * @Date 2022/8/31 9:51
 */
public class Flink02_WC_UnBounded {
    public static void main(String[] args) {
        System.out.println("Flink02_WC_UnBounded.main");
        // 1. 创建流的执行环境  自动根据当前运行的环境获取合适的执行环境
        
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(2);
        env.disableOperatorChaining(); //
        // 2. 通过环境从数据源(source)获取一个流
        DataStreamSource<String> source = env.socketTextStream("hadoop162", 9999);
        // 3. 对流做各种转换
        SingleOutputStreamOperator<String> wordStream = source
            .flatMap(new FlatMapFunction<String, String>() {
                @Override
                public void flatMap(String line, Collector<String> out) throws Exception {
                    for (String word : line.split(" ")) {
                        out.collect(word);  // 把单词放入到后序的流中
                    }
                }
            });
        
        SingleOutputStreamOperator<Tuple2<String, Long>> wordOneStream = wordStream
            .map(new MapFunction<String, Tuple2<String, Long>>() {
                @Override
                public Tuple2<String, Long> map(String word) throws Exception {
                    return Tuple2.of(word, 1L);
                }
            })
            //.startNewChain()
            // .disableChaining()
            .filter(t -> true); // 开一起一个新链
        
        
        KeyedStream<Tuple2<String, Long>, String> keyedStream = wordOneStream.keyBy(new KeySelector<Tuple2<String, Long>, String>() {
            // 返回每个元素的key
            @Override
            public String getKey(Tuple2<String, Long> t) throws Exception {
                return t.f0;
            }
        });
        // 对元组中某个位置进行聚合
        SingleOutputStreamOperator<Tuple2<String, Long>> resultStream = keyedStream.sum(1);
        
        
        // 4. 把流输出(sink)
        resultStream.print();
        
        // 5. 执行 执行环境
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
/*
操作链的优化是自动实现:
    算子与算子之间: one-to-one
    算子的并行度一样

.startNewChain()
    开启一个新链: 当前算子不会和前面的优化在一起
    
    如果后面的算子满足优化条件, 也会与当前算子优化到一起
    
.disableChaining()
    当前算子不参与任何链的优化
    
 env.disableOperatorChaining()
    当前应用所有算子都不进行优化
    
 实际生产环境下, 尽量不要禁止优化: 优化只有好处没有坏处
----


在flink中, 有4种办法设置并行度

1. 在flink-conf.yaml 配置文件中设置算子的默认并行度

        parallelism.default: 1
        
2. 提交job的是时候通过参数设置
    bin/flink run -d -p 2 ...

3. 在代码中通过环境设置
    env.setParallelism(1);
    
    注意: 在flink中socket这个source他的并行度只能是1.
    
4. 单独给每个算子设置并行度
    .setParallelism(3)
 
    

 */