package com.atguigu.flink.chapter03;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @Author lzc
 * @Date 2022/7/3 10:16
 */
public class Flink02_WC_UnBounded_Lambda {
    public static void main(String[] args) throws Exception {
        System.out.println("main.....");
        // 1. 创建流的执行环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 10000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(2);
        env.disableOperatorChaining();
        // 2. 通过环境从source获取一个流(source)  file
        DataStreamSource<String> source = env.socketTextStream("hadoop162", 9999); // 并行度只能是1
        // 3. 对流做各种转换(transform)
        SingleOutputStreamOperator<String> wordStream = source
            .flatMap((String line, Collector<String> out) -> {
                for (String word : line.split(" ")) {
                    out.collect(word);
                }
            })
            .returns(Types.STRING);
        
        SingleOutputStreamOperator<Tuple2<String, Long>> wordOneStream = wordStream
            .map(word -> Tuple2.of(word, 1L))
//            .startNewChain()
//            .disableChaining()
            .returns(Types.TUPLE(Types.STRING, Types.LONG))
            .map(new MapFunction<Tuple2<String, Long>, Tuple2<String, Long>>() {
                @Override
                public Tuple2<String, Long> map(Tuple2<String, Long> value) throws Exception {
                    return value;
                }
            });
        
        KeyedStream<Tuple2<String, Long>, String> keyedStream = wordOneStream
            .keyBy(t -> t.f0);
        
        SingleOutputStreamOperator<Tuple2<String, Long>> resultStream = keyedStream.sum(1);
        // 4.输出(sink)
        resultStream.print();
        
        // 5. 执行流环境
        env.execute();
    }
}
/*
如何设置并行度:
    4种方式
    
    1. 在配置文件中
        parallelism.default: 2
    2. 提交应用的时候
        bin/flink run -p 2  ...
        
    3. 在代码中, 通过env设置
        env.setParallelism(2);
        
    4. 单独给算子设置并行度
        .map(word -> Tuple2.of(word, 1L))
            .setParallelism(3)

----------
操作链

  .startNewChain()
    当前算子不和全面的算子合并
  .disableChaining()
    当前算子禁止合并
  env.disableOperatorChaining();
    整个job禁止合并
  

 */