package cn.jly.flink.state;

import cn.jly.flink.entity.MetricEvent;
import com.alibaba.fastjson.JSON;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Properties;

/**
 * @PackageName cn.jly.flink.state
 * @ClassName ValueStateDemo
 * @Description 状态实现计数
 * @Author 姬岚洋
 * @Date 2021/1/19 下午 8:55
 */
public class ValueStateDemo {

    public static final String TOPIC_NAME = "test_count";

    public static final String BOOTSTRAP_SERVERS_VALUE = "node01:9092,node02:9092,node03:9092";

    public static final String DEFAULT_GROUP_ID_VALUE = "test";

    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        final Properties properties = new Properties();
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS_VALUE);
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, DEFAULT_GROUP_ID_VALUE);
        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");

        env.addSource(new FlinkKafkaConsumer<>(TOPIC_NAME, new SimpleStringSchema(), properties))
                .flatMap(new FlatMapFunction<String, MetricEvent>() {
                    @Override
                    public void flatMap(String s, Collector<MetricEvent> collector) throws Exception {
                        if (StringUtils.isNotEmpty(s)) {
                            collector.collect(JSON.parseObject(s, MetricEvent.class));
                        }
                    }
                })
                // 指定时间戳和水印生成策略，允许数据2秒延迟
                .assignTimestampsAndWatermarks(WatermarkStrategy.forBoundedOutOfOrderness(Duration.of(2, ChronoUnit.SECONDS)))
                .keyBy(new KeySelector<MetricEvent, String>() {
                    @Override
                    public String getKey(MetricEvent metricEvent) throws Exception {
                        return metricEvent.getEventTag();
                    }
                })
                // 滑动窗口，每4秒统计过去5秒的数据
                .window(SlidingEventTimeWindows.of(Time.seconds(5), Time.seconds(4)))
                .process(new ProcessWindowFunction<MetricEvent, Tuple3<String, Long, Long>, String, TimeWindow>() {
                    /**
                     * 状态，f0: key, f1: 当前窗口统计, f2: 累计总统计
                     */
                    private transient ValueState<Tuple3<String, Long, Long>> typeCountValueState;

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        super.open(parameters);

                        /*
                            ttl设置 https://gitbook.cn/gitchat/column/5dad4a20669f843a1a37cb4f/topic/5db6a754f6a6211cb9616526
                            State TTL 介绍
                            TTL 可以分配给任何类型的 Keyed state，如果一个状态设置了 TTL，那么当状态过期时，那么之前存储的状态值会被清除。
                            所有的状态集合类型都支持单个入口的 TTL，这意味着 List 集合元素和 Map 集合都支持独立到期。
                            为了使用状态 TTL，首先必须要构建 StateTtlConfig 配置对象，
                            然后可以通过传递配置在 State descriptor 中启用 TTL 功能

                            参数：
                            1. newBuilder 方法的第一个参数是必需的，它代表着状态存活时间
                            2. UpdateType 配置状态 TTL 更新时（默认为 OnCreateAndWrite）：
                                - StateTtlConfig.UpdateType.OnCreateAndWrite: 仅限创建和写入访问时更新
                                - StateTtlConfig.UpdateType.OnReadAndWrite: 除了创建和写入访问，还支持在读取时更新
                            3. StateVisibility 配置是否在读取访问时返回过期值（如果尚未清除），默认是 NeverReturnExpired
                                - StateTtlConfig.StateVisibility.NeverReturnExpired: 永远不会返回过期值
                                - StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp: 如果仍然可用则返回
                         */
//                        final StateTtlConfig stateTtlConfig = StateTtlConfig
//                                .newBuilder(org.apache.flink.api.common.time.Time.seconds(5))
//                                .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
//                                .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
//                                .build();

                        final ValueStateDescriptor<Tuple3<String, Long, Long>> descriptor = new ValueStateDescriptor<>(
                                "typeCountValueState",
                                TypeInformation.of(new TypeHint<Tuple3<String, Long, Long>>() {
                                })
                        );
                        // ttl设置使能
//                        descriptor.enableTimeToLive(stateTtlConfig);

                        typeCountValueState = getRuntimeContext().getState(descriptor);
                    }

                    @Override
                    public void process(String key, Context context, Iterable<MetricEvent> iterable, Collector<Tuple3<String, Long, Long>> collector) throws Exception {
                        final Tuple3<String, Long, Long> value = typeCountValueState.value();

                        // 窗口计数
                        long count = CollectionUtils.size(iterable);

                        if (value == null) {
                            typeCountValueState.update(Tuple3.of(key, count, count));
                        } else {
                            // f1表示当前窗口的统计大小
                            value.f1 = count;
                            // f2表示所有窗口的累计统计大小
                            value.f2 += count;
                            typeCountValueState.update(value);
                        }

                        // 写出
                        collector.collect(typeCountValueState.value());
                    }
                })
                .print();

        env.execute("ValueStateDemo");
    }
}
