package com.zsy.flink.transformation;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @Description:
 * @ClassName: TransformationApp
 * @Author: Zhou ShiYang
 * @Date: 2021/8/25 17:05
 */
public class TransformationApp {

    public static void main(String[] args) throws Exception {
        // 创建上下文
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        reduce(env);

        env.execute("SourceApp");
    }

    /**
     * map
     * 将map算子对应的函数作用到DataStream，产生一个新的DataStream
     * map会作用到已有的DataStream这个数据集中的每一个元素上
     *
     * @param env
     */
    public static void map(StreamExecutionEnvironment env) {
        // 对接数据源数据, 读进来的是一行一行的，字符串类型
        DataStreamSource<String> source = env.readTextFile("data/access.log");
        // map算子，每一行数据转成一个对象
        SingleOutputStreamOperator<Access> map = source.map((value) -> {
            String[] split = value.split(",");
            Long time = Long.parseLong(split[0].trim());
            String domain = split[1].trim();
            Double traffic = Double.parseDouble(split[2].trim());
            return new Access(time, domain, traffic);
        });

        map.print();
    }

    /**
     * filter
     * 计算每个数据元的布尔函数，并保存函数返回true的数据元
     *
     * @param env
     */
    public static void filter(StreamExecutionEnvironment env) {
        // 对接数据源数据, 读进来的是一行一行的，字符串类型
        DataStreamSource<String> source = env.readTextFile("data/access.log");
        // map算子，每一行数据转成一个对象
        SingleOutputStreamOperator<Access> map = source.map((value) -> {
                    String[] split = value.split(",");
                    Long time = Long.parseLong(split[0].trim());
                    String domain = split[1].trim();
                    Double traffic = Double.parseDouble(split[2].trim());
                    return new Access(time, domain, traffic);
                })
                .filter(value -> value.getTraffic() > 4000);// 过滤出大于4000的

        map.print();
    }

    /**
     * flatMap
     * 采用一个数据元并生成零个，一个或多个数据元
     * 进来一行一行的数据
     * pk,pk,flink
     * pk,spark,spark
     *
     * @param env
     */
    public static void flatMap(StreamExecutionEnvironment env) {
        // 对接数据源数据, 读进来的是一行一行的，字符串类型
        DataStreamSource<String> source = env.readTextFile("data/wc.data");
        // 把数据按照逗号拆分
        source.flatMap(new FlatMapFunction<String, String>() {
                    @Override
                    public void flatMap(String value, Collector<String> out) throws Exception {
                        String[] split = value.split(",");
                        for (String s : split) {
                            out.collect(s);
                        }
                    }
                })
                .filter(value -> !value.equals("pk"))// 过滤掉pk
                .print();
    }

    /**
     * keyBy 分组
     * 逻辑上将流分区为不相交的分区。具有相同Keys的所有记录都分配给同一分区。
     *
     * @param env
     */
    public static void keyBy(StreamExecutionEnvironment env) {
        // 对接数据源数据, 读进来的是一行一行的，字符串类型
        DataStreamSource<String> source = env.readTextFile("data/access.log");
        // map算子，每一行数据转成一个对象
        source.map(value -> {
                    String[] split = value.split(",");
                    Long time = Long.parseLong(split[0].trim());
                    String domain = split[1].trim();
                    Double traffic = Double.parseDouble(split[2].trim());
                    return new Access(time, domain, traffic);
                })
                .keyBy("domain") // 根据 domain 分组
                .sum("traffic") // 根据 traffic求和
                .print();

        // map算子，每一行数据转成一个对象
        source.map(value -> {
                    String[] split = value.split(",");
                    Long time = Long.parseLong(split[0].trim());
                    String domain = split[1].trim();
                    Double traffic = Double.parseDouble(split[2].trim());
                    return new Access(time, domain, traffic);
                })
                .keyBy(value -> value.getDomain()) // 根据 domain 分组
                .sum("traffic") // 根据 traffic求和
                .print();
    }

    /**
     * reduce
     * 被Keys化数据流上的“滚动”Reduce。将当前数据元与最后一个Reduce的值组合并发出新值。
     *
     * @param env
     */
    public static void reduce(StreamExecutionEnvironment env) {
        // 对接数据源数据, 读进来的是一行一行的，字符串类型
        DataStreamSource<String> source = env.readTextFile("data/wc.data");
        // 附上每个
        source.flatMap(new FlatMapFunction<String, String>() {
                    @Override
                    public void flatMap(String value, Collector<String> out) throws Exception {
                        String[] split = value.split(",");
                        for (String s : split) {
                            out.collect(s);
                        }
                    }
                })
                .map(new MapFunction<String, Tuple2<String, Integer>>() {
                    @Override
                    public Tuple2<String, Integer> map(String value) throws Exception {
                        return Tuple2.of(value, 1);
                    }
                })
                // 按照 word 分组
                .keyBy(value -> value.f0)
                // 对每个分组进行操作
                .reduce(new ReduceFunction<Tuple2<String, Integer>>() {
                    @Override
                    public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) throws Exception {
                        return Tuple2.of(value1.f0, value1.f1 + value2.f1 );
                    }
                }).print();

    }
}
