package com.edata.bigdata.viewmain;

import com.edata.bigdata.basic.Manager;
import com.edata.bigdata.entity.Model;
import com.edata.bigdata.flink.StreamSink;
import com.edata.bigdata.flink.StreamSource;
import org.apache.flink.configuration.CheckpointingOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.streaming.api.datastream.DataStream;

import java.time.Duration;
import java.util.Properties;

/*
 * 该例子包含以下内容
 * 1. Flink程序从Kafka中消费消息。
 * 2. 采用了checkpoint，指定hdfs文件目录为存储目录，删除kafka自动管理offset的配置，不删除的情况下可能冲突。
 * 3. 消费的数据写入hdfs。
 * */
public class FlinkK2S_2 {
    public static void main(String[] args) throws Exception {

        /*初始化基础环境*/
        Manager manager = new Manager();
        //设置checkpoint后，就可以取代kafka客户端的auto.commit.config的相关功能
        Configuration checkpointConfig = new Configuration();
        checkpointConfig.set(CheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(10));
        checkpointConfig.set(CheckpointingOptions.CHECKPOINT_STORAGE, "filesystem");
        checkpointConfig.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, "hdfs://172.16.11.97:8082/testing/flinkK2S_2_checkpoint");

        manager.flinkConf = checkpointConfig;
        manager.createFlinkEnvironment();


        /*kafka Source 相关配置*/
        Properties sourceProps = new Properties();
        sourceProps.setProperty("kafka.bootstrap.servers", "172.16.11.97:9092");
        sourceProps.setProperty("subscribe", "test");
        sourceProps.setProperty("group.id", "test");
        sourceProps.setProperty("startingOffsets", "earliest");
        sourceProps.setProperty("kafka.session.timeout.ms", "30000");

        /*设置水印，不设置或者enable.waterMask=false的情况下为 WatermarkStrategy.noWatermarks()*/
        sourceProps.setProperty("watermarks.enabled", "true");
        sourceProps.setProperty("watermarks.maxOutOfOrder.sec", "2");
        sourceProps.setProperty("watermarks.idleness.sec", "60");

        /*管理offset的相关配置，以下是由Kafka自动管理，上面配置了checkpoint，所以这里自动提交offset就不生效了*/
//        Properties offsetProps = new Properties();
//        offsetProps.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//        offsetProps.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
//        offsetProps.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
//        offsetProps.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "10000");

        /*创建source*/
        StreamSource streamSource = new StreamSource(manager.flinkEnv, "yyyy-MM-dd HH:mm:ss:SSS");

        DataStream<Model> sourceStream = streamSource.createDataStream(sourceProps,
                null,
                Model.class);
        DataStream<Model> windowedStream = streamSource
                .applySlidingWindows(sourceStream, 10, 10, 5, Model.class)
                .reduce((a, b) -> a);

        Properties sinkProps = new Properties();
        sinkProps.setProperty("sink.file.prefix", "model");
        sinkProps.setProperty("sink.file.suffix", ".csv");
        StreamSink streamSink = new StreamSink();
        FileSink<Model> sink = streamSink.createCSVSink("hdfs://172.16.11.97:8082/testing/flinkK2S_2",sinkProps,Model.class);
        windowedStream.sinkTo(sink);
        streamSource.start("Kafka2HDFSFlink");

    }
}
