package com.xinqing.bigdata.flink.datastream.window;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.xinqing.bigdata.flink.datastream.sink.Kafka2KuduWithWaterMark;
import com.xinqing.bigdata.flink.datastream.function.AvgAggreFunction;
import com.xinqing.bigdata.flink.datastream.function.AvgAggreProcessFunction;
import com.xinqing.bigdata.flink.datastream.model.UserCostInfo;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.Properties;

/**
 * @Author:CHQ
 * @Date:2021/5/24 14:24
 * @Description
 */
public class AggregateAndProcessWindowFunction {
    public static void main(String args[]) throws Exception {

        Logger logger = LoggerFactory.getLogger(Kafka2KuduWithWaterMark.class);
        //  StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); //固定Web-UI端口
        // executionConfig.setAutoWatermarkInterval(200); //设置自动提交wm的时间间隔，避免每条记录生成WM后提交一次，那样会影响程序性能
        env.setParallelism(1);


        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointInterval(1000); //设置CheckPoint时间间隔
        checkpointConfig.setCheckpointTimeout(60000);//设置CheckPoint超时时间，失败则弃用
        checkpointConfig.setMaxConcurrentCheckpoints(1); //最多同时允许存在一个CheckPoint

        //Kafka集群配置
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "10.201.7.175:9092");
        properties.setProperty("group.id", "chq11");
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("auto.offset.reset", "latest");


        env.addSource(new FlinkKafkaConsumer<>("cost", new SimpleStringSchema(), properties))
                .map((MapFunction<String, UserCostInfo>) value -> {
                    JSONObject jsonObject = JSON.parseObject(value);
                    String sid = jsonObject.getString("sid");
                    Double money = jsonObject.getDouble("money");
                    Long timestamp = jsonObject.getLong("timestamp");
                    return new UserCostInfo(sid, money, timestamp);
                })
                .assignTimestampsAndWatermarks(WatermarkStrategy.<UserCostInfo>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                        .withIdleness(Duration.ofSeconds(10))
                        .withTimestampAssigner((SerializableTimestampAssigner<UserCostInfo>) (element, recordTimestamp) -> element.getTimestamp())
                )
                .keyBy((KeySelector<UserCostInfo, String>) value -> value.getSid())
                .window(TumblingEventTimeWindows.of(Time.seconds(10)))
                .aggregate(new AvgAggreFunction(), new AvgAggreProcessFunction());

        env.execute();

    }
}
