package com.liutong;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer09;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.util.Collector;

import java.util.Properties;

public class Kafka2FlinkTestPlus {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        env.enableCheckpointing(10000);
        Properties properties = new Properties();
        properties.setProperty("zookeeper.connect", "node01:2181,node02:2181,node03:2181");
        properties.setProperty("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
        properties.setProperty("group.id", "flink-group2");
        properties.setProperty("enable.auto.commit", "true");
        properties.setProperty("auto.commit.interval.ms", "1000");
        properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("session.timeout.ms", "30000");
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("topic","test2");
        FlinkKafkaConsumer09<String> myConsumer = new FlinkKafkaConsumer09<>("test2", new SimpleStringSchema(), properties);
        DataStreamSource<String> source = env.addSource(myConsumer);
       // source.print();
        SingleOutputStreamOperator count = source.flatMap(new Tokenizerd()).keyBy(0).sum(1);

        count.print();
        FlinkJedisPoolConfig conf = new FlinkJedisPoolConfig.Builder().setHost("127.0.0.1").setPort(6379).build();
        RedisSink<Tuple2<String,Integer>> redisSink = new RedisSink<>(conf, new redisExample());
        count.addSink(redisSink);
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }


    }

    private static class Tokenizerd implements FlatMapFunction<String, Tuple2<String,Integer>> {
        @Override
        public void flatMap(String s, Collector<Tuple2<String, Integer>> collector) throws Exception {
            for (String words:s.split(" ")){
                collector.collect(new Tuple2<String, Integer>(words,1));
            }
        }
    }
/*
指定redis的key 并将flink的数据类型映射到redis数据类型
 */
    private static class redisExample implements org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper<Tuple2<String,Integer>> {
        //设置数据使用的数据结构 hashset 并设置key的名称
        @Override
        public RedisCommandDescription getCommandDescription() {
            return new RedisCommandDescription(RedisCommand.HSET,"flink");
        }
/*
*   获取value值 value的数据是键值对
 */
//指定key
        @Override
        public String getKeyFromData(Tuple2<String,Integer> data) {
            return data.f0.toString();
        }
//指定value
        @Override
        public String getValueFromData(Tuple2<String,Integer> data) {
            return data.f1.toString();
        }
    }
}
