package com.diver.flinkdemo;

import com.diver.flinkdemo.entity.DalData;
import com.diver.flinkdemo.entity.DevUploadData;
import com.diver.flinkdemo.sink.DevUploadDataSink;
import com.diver.flinkdemo.utils.DynamicDataSourceUtil;
import org.apache.commons.beanutils.BeanUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.io.IOException;

/**
 * @author lujw
 * @Date 2023/4/28 15:38
 * @desc "学生证上报的所有数据"
 */

public class OriUploadDataJob {

    public static void main(String[] args) {
        DynamicDataSourceUtil sourceUtil = new DynamicDataSourceUtil();
        try {
            sourceUtil.init();
        } catch (IOException e) {
            e.printStackTrace();
        }

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //exactly-once 语义保证整个应用内端到端的数据一致性
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //开启检查点并指定检查点时间间隔为5s
        env.enableCheckpointing(60000); // checkpoint every 5000 msecs

        KafkaSource<String> source =
                KafkaSource.<String>builder().setBootstrapServers("172.16.100.67:9092").setTopics("aiot-msg")
                        .setGroupId("flinkdev").setStartingOffsets(OffsetsInitializer.latest())//OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST)
                        .setValueOnlyDeserializer(new SimpleStringSchema()).build();

        DataStream<String> stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka Source") // 指定从最新offset开始消费;
                .setParallelism(1);
        //1.设备原始上报数据入库
        stream.filter(new FilterFunction<String>() {
            @Override
            public boolean filter(String s) throws Exception {
                DalData dalData = DalData.convert(s);

                return "XSZ_UP_MSG".equals(dalData.getType());
            }
        }).map(new MapFunction<String, DevUploadData>() {
            @Override
            public DevUploadData map(String s) throws Exception {

                DalData dalData = DalData.convert(s);
                DevUploadData uploadData = new DevUploadData();
                BeanUtils.copyProperties(uploadData,dalData);
                return uploadData;
            }
        }).addSink(new DevUploadDataSink());

        try {
            env.execute("OriUploadDataJob");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
