package com.wudl.flink.functions;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSink;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.util.Collector;
import scala.collection.immutable.Stream;


import java.util.Random;
import java.util.concurrent.TimeUnit;

/**
 * @version v1.0
 * @ProjectName lflink
 * @ClassName FunctionsDemo
 * @Description TODO flink 函数的实例
 * @Author wudl
 * @Date 2020/7/30 9:28
 */

public class FunctionsDemo {


    public static void main(String[] args) throws Exception {
        FunctionsDemo fd = new FunctionsDemo();
//        fd.FunctionMap();
//        fd.FunctionFlatMap();
//          fd.functionFilter();
//        fd.functionBykeySum();
        fd.functionReduce();
    }


    public void functionReduce() throws Exception {
        final String[] arr = {"hdfs", "hive", "yarn", "hbase"};
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        DataStreamSource<Tuple2<String, Integer>> tuple2DataStreamSource = env.addSource(new SourceFunction<Tuple2<String, Integer>>() {
            private volatile boolean isRunning = true;
            private final Random random = new Random();

            @Override
            public void run(SourceContext<Tuple2<String, Integer>> ctx) throws Exception {
                while (isRunning) {
                    TimeUnit.SECONDS.sleep(1);
                    ctx.collect(Tuple2.of(arr[random.nextInt(arr.length)], 1));
                }
            }
            @Override
            public void cancel() {
                isRunning = false;
            }
        }, "order-info");

        tuple2DataStreamSource.keyBy(0).reduce(new ReduceFunction<Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) throws Exception {
                return Tuple2.of(value1.f0, value1.f1 + value2.f1);
            }
        }).print();

        env.execute("qq");
    }

    /**
     * 模拟了 每秒发出来一个数据  进行统计
     */
    public void functionBykeySum() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        final String[] arr = {"hdfs", "hive", "yarn", "hbase"};
        DataStreamSource<Tuple2<String, Integer>> orderSource = env.addSource(new SourceFunction<Tuple2<String, Integer>>() {
            private volatile boolean isRunning = true;
            private final Random random = new Random();

            @Override
            public void run(SourceContext<Tuple2<String, Integer>> ctx) throws Exception {
                while (isRunning) {
                    TimeUnit.SECONDS.sleep(1);
                    ctx.collect(Tuple2.of(arr[random.nextInt(arr.length)], 1));
                }
            }

            @Override
            public void cancel() {
                isRunning = false;
            }
        }, "order-info");

        orderSource.keyBy(0).sum(1).print();

        env.execute("1");
    }


    /**
     * 场景：  是将字符串进行一个过滤和筛选  例如： 将数组中的字符串长度大于5 的长度打印出来
     *
     * @throws Exception
     */
    public void functionFilter() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        String[] world = {"hdfs", "hive", "kafka", "hbasegggggggbbbbbb"};
        DataStreamSource<String> dataStreamSource = env.fromElements(world);
        SingleOutputStreamOperator<String> SingleOutputStream = dataStreamSource.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public void flatMap(String s, Collector<String> collector) throws Exception {
                collector.collect(s);
                collector.collect(s.toUpperCase());
            }

        });
        SingleOutputStreamOperator<String> filter = SingleOutputStream.filter((value) -> value.length() > 5);
        filter.addSink(new SinkFunction<String>() {
            @Override
            public void invoke(String value) throws Exception {
                System.out.println("长度大于5 的" + value);
            }
        });
        env.execute("filter");
    }


    /**
     * 函数之一-----> map
     * 模式：------> 调用用户自定义的MapFunction 对DataStream[T] 数据进行处理，形成新的 Data-Stream[T]  数据的格式发生变化
     * 使用场景： 常用于数据集类的数据进行 ----"清洗和转化", 例如: 需要转大小写,对数据进行加1   等
     *
     * @throws Exception
     */
    public void FunctionMap() throws Exception {
        // 获取环境的信息
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        Integer[] world = {1, 3, 5, 8};
        // 执行算子的并行度
        env.setParallelism(1);
        DataStreamSource<Integer> ds = env.fromElements(world);
        SingleOutputStreamOperator<Integer> map = ds.map(new MapFunction<Integer, Integer>() {
            public Integer map(Integer s) throws Exception {
                return s + 3;
            }
        });
        // 将结果输入到sink 中
        map.addSink(new SinkFunction<Integer>() {
            public void invoke(Integer value) throws Exception {
                System.out.println("value------------" + value);
            }
        });
        System.out.println("**************************************");
        // 打印结果
        map.print();
        env.execute("a");

    }

    /**
     * 函数之 二  **************** flatMap
     * 使用场景 : 适用于一个元素产生一个 多这多个的元素的计算场景  一对多
     *
     * @throws Exception
     */
    public void FunctionFlatMap() throws Exception {

        //初始化flink 上下文的环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        String[] world = {"hdfs", "hive", "hbase", "kafka", "flink"};
        DataStreamSource<String> dataStreamSource = env.fromElements(world);
        SingleOutputStreamOperator streamFlatMap = dataStreamSource.flatMap(new FlatMapFunction<String, String>() {
            public void flatMap(String s, Collector<String> out) throws Exception {
                // 输出原始的 不做处理
                out.collect(s);
                //  将原来的数据转为大写进行处理
                out.collect(s.toUpperCase());

            }
        });


        streamFlatMap.addSink(new SinkFunction<String>() {

            public void invoke(String value) throws Exception {
                System.out.println(value);
            }
        });

        env.execute("qq");
    }
}
