package com.diver.flinkdemo;

import com.diver.flinkdemo.service.PushDataService;
import com.diver.flinkdemo.entity.DalData;
import com.diver.flinkdemo.utils.DynamicDataSourceUtil;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.io.IOException;

/**
 * @author lujw
 * @Date 2023/4/28 10:56
 * @desc "消息推送需要的数据推送kafka"
 */

public class MessageDataPushJob {

    public static void main(String[] args) {
        DynamicDataSourceUtil sourceUtil = new DynamicDataSourceUtil();
        try {
            sourceUtil.init();
        } catch (IOException e) {
            e.printStackTrace();
        }

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

//             exactly-once 语义保证整个应用内端到端的数据一致性
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//             开启检查点并指定检查点时间间隔为5s
        env.enableCheckpointing(60000); // checkpoint every 5000 msecs

        KafkaSource<String> source =
                KafkaSource.<String>builder().setBootstrapServers("172.16.100.67:9092").setTopics("aiot-msg")
                        .setGroupId("flinkdev").setStartingOffsets(OffsetsInitializer.latest())//OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST)
                        .setValueOnlyDeserializer(new SimpleStringSchema()).build();

        DataStream<String> stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka Source") // 指定从最新offset开始消费;
                .setParallelism(1);


        //同时往kafka里推送
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                .setBootstrapServers("172.16.100.72:9092")
                .setRecordSerializer(KafkaRecordSerializationSchema.builder()
                        .setTopic("push-data")
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build()
                )
                .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
                .build();

        stream.filter(new FilterFunction<String>() {
            @Override
            public boolean filter(String s) throws Exception {
                DalData dalData = new DalData();
                if (s!=null) {
                    dalData =  DalData.convert(s);
                }
                return "XSZ_UP_MSG".equals(dalData.getType()) || "PARSED_LOCATION".equals(dalData.getType());
            }
        }).map(new MapFunction<String, String>() {
            @Override
            public String map(String s) throws Exception {
                if (DynamicDataSourceUtil.mysqlSessionFactory == null) {
                    new DynamicDataSourceUtil().init();
                }
                PushDataService service = new PushDataService();
                return service.handDalData(s);
            }
        }).filter(new FilterFunction<String>() {
            @Override
            public boolean filter(String s) throws Exception {
                return StringUtils.isNotBlank(s);
            }
        }).sinkTo(kafkaSink);
//        stream.sinkTo(kafkaSink);

        try {
            env.execute("MessageDataPushJob");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
