package cn.mesmile.flink.sink;

import cn.mesmile.flink.jdkstream.VideoOrder;
import cn.mesmile.flink.source.VideoOrderSource;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.util.Collector;

import java.util.Date;

/**
 * @author zb
 * @date 2021/8/22 11:39
 * @Description
 */
public class FlinkRedisSinkApp {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(2);
        env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);

//        DataStream<VideoOrder> ds = env.fromElements(
//                new VideoOrder("21312","java",32,5,new Date()),
//                new VideoOrder("314","java",32,5,new Date()),
//                new VideoOrder("542","springboot",32,5,new Date()),
//                new VideoOrder("42","redis",32,5,new Date()),
//                new VideoOrder("52","java",32,5,new Date()),
//                new VideoOrder("55","test",32,5,new Date()),
//                new VideoOrder("56","cloud",32,5,new Date()),
//                new VideoOrder("523","redis",32,5,new Date())
//        );
        DataStreamSource<VideoOrder> ds = env.addSource(new VideoOrderSource());
        // map转换，来一个记录一个，方便后续统计【map 针对一对一的时候使用】
        DataStream<Tuple2<String,Integer>> mapDS = ds.map(new MapFunction<VideoOrder, Tuple2<String,Integer>>() {
            @Override
            public Tuple2<String, Integer> map(VideoOrder value) throws Exception {
                return new Tuple2<>(value.getTitle(),1);
            }
        });

        // 只是一对一记录而已，没必要使用flatMap，【一对多的时候用 flatMap, 例如 "123,456","67,89,98" 多个字符串需要分割】
//        DataStream<Tuple2<String,Integer>> mapDS = ds.flatMap(new FlatMapFunction<VideoOrder, Tuple2<String,Integer>>() {
//            @Override
//            public void flatMap(VideoOrder value, Collector<Tuple2<String, Integer>> out) throws Exception {
//                out.collect(new Tuple2<>(value.getTitle(),1));
//            }
//        });

        // 【分组】key是返回的类型，value是分组key的类型; DataSet里面分组是groupBy， 流处理里面分组用 keyBy
        KeyedStream<Tuple2<String,Integer>,String> keyByDS =  mapDS.keyBy(new KeySelector<Tuple2<String,Integer>, String>() {
            @Override
            public String getKey(Tuple2<String, Integer> value) throws Exception {
                return value.f0;
            }
        });

        // 【统计】下标从 0 开始
        //对各个组内的数据按照数量(value)进行聚合就是求sum, 1表示按照tuple中的索引为1的字段也就是按照数量进行聚合累加
        DataStream<Tuple2<String, Integer>> sumDS = keyByDS.sum(1);
        sumDS.print();

        FlinkJedisPoolConfig conf = new FlinkJedisPoolConfig.Builder()
                .setHost("81.69.43.78")
                .setPort(6379)
                .setDatabase(0)
                .setTimeout(3000).build();


        sumDS.addSink(new RedisSink<>(conf, new MyRedisSink()));
        //DataStream需要调用execute,可以取个名称
        env.execute("custom sink job");
    }
}
