package server_timu;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.JsonNode;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.redis.RedisSink;
import org.apache.flink.streaming.connectors.redis.common.config.FlinkJedisPoolConfig;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommand;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisCommandDescription;
import org.apache.flink.streaming.connectors.redis.common.mapper.RedisMapper;
import org.apache.flink.util.Collector;
import util.Kafka_util;

/*
       4、使用Flink消费Kafka的dwd层数据，统计商城实时订单数量，将key设置成totalcount存入Redis中。
       使用redis cli以get key方式获取totalcount值；
 */
public class T4 {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //  todo flink自带的解析json格式数据的实例化
        ObjectMapper objectMapper = new ObjectMapper();

        //  todo 配置 Flink 与 Redis 连接池 的配置构建器模式实现
        FlinkJedisPoolConfig jedisPoolConfig = new FlinkJedisPoolConfig.Builder()
                .setHost("192.168.40.110")
                .build();

        //  todo 拿到数据
        KafkaSource<String> kafka_source = Kafka_util.getKafkaSource("fact_order_master");
        DataStreamSource<String> data = env.fromSource(
                kafka_source,
                WatermarkStrategy.noWatermarks(),
                "kafka_source"
        );

        //  todo 对数据进行分析:实时统计商场的订单量(不要状态为已退款的订单)
        SingleOutputStreamOperator<Tuple2<String, Integer>> result = data.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
                    @Override
                    public void flatMap(String value, Collector<Tuple2<String, Integer>> collector) throws Exception {
                        //  首先解析成json数据
                        JsonNode jsonNode = objectMapper.readTree(value);
                        //  todo 首先拿到订单的状态(要考虑退款和取消订单)
                        String order_statu = jsonNode.get("order_status").asText();
                        //  将订单状态和1往下游发送
                        collector.collect(Tuple2.of(order_statu, 1));
                    }
                })
                // todo 将已经退款的数据过滤掉       
                .filter(stringTuple2 -> !stringTuple2.f0.contains("已退款"))
                //  todo 对所有的数据进行分区，这样会全部分在一个里面
                .keyBy(stringTuple2 -> true)
                //  todo 统计整个分区的数据量
                .sum(1);

        result.printToErr("商场实时订单量");

        //  todo 将数据写入redis  创建一个redis_sink,然后第一个参数是redis的连接池，第二个为定义写入的方式数据等
        result.addSink(new RedisSink<>(jedisPoolConfig,new to_redis()));



        env.execute();

    }
    //  todo 自定义redis_sink实现类
    //  RedisMapper 是 Flink-Redis Connector 的核心接口
    public static class to_redis implements RedisMapper<Tuple2<String,Integer>>{

        //  todo 指定redis操作类型  使用redis的set命令(覆盖式写入)
        @Override
        public RedisCommandDescription getCommandDescription() {
            return new RedisCommandDescription(RedisCommand.SET);
        }

        //  todo 定义redis的key，下面是将所有的数据写入redis的同一个key中
        @Override
        public String getKeyFromData(Tuple2<String, Integer> stringIntegerTuple2) {
            return "totalcount";
        }

        //  定义key对应的值，将tuple2的第二个元素作为值
        @Override
        public String getValueFromData(Tuple2<String, Integer> stringIntegerTuple2) {
            return stringIntegerTuple2.f1.toString();
        }
    }
}
