package com.asap.demo.connectors;

import com.asap.demo.sink.ClickhouseSink;
import com.asap.demo.rete.ReteDemo4;
import com.asap.rule.StandardEvent;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;

/**
 * @author wangbh
 * @Description: 入口
 * @date 2021/11/25 10:05
 */
public class FlinkConnectorsCK {
    private static final Logger logger = LoggerFactory.getLogger(ReteDemo4.class);
/*
1 2 6 100w 1min
1 2 4 100w 1.3min
* */
    public static void main(String[] args) {
        try {
            final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

            env.setParallelism(1);
            env.enableCheckpointing(5000);  //检查点 每5000ms
            env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

            Properties browseProperties = new Properties();
            browseProperties.put("bootstrap.servers", "10.28.184.25:9093");
            browseProperties.put("group.id", "temporal");
            browseProperties.put("auto.offset.reset", "latest");

            //1、取kafka
            DataStream<String> dataStream = env
                    .addSource(new FlinkKafkaConsumer<>(
                            "flink_pressure_test7",
                            new SimpleStringSchema(),
                            browseProperties
                    )).name("Read Kafka").setParallelism(1);
            //2、计算
            DataStream<StandardEvent> kafkaData = dataStream
                    .map(new MapFunction<String, StandardEvent>() {
                        @Override
                        public StandardEvent map(String value) throws Exception {
                            StandardEvent standardEvent = StandardEvent.parse(value);
                            standardEvent.getAllFields().put("RawMsg",value);
                            return standardEvent;
                        }
                    }).setParallelism(2).name("cala");
            //3、输出到es
            kafkaData.addSink(new ClickhouseSink()).setParallelism(4).name("Insert ck Job");
            env.execute(".....flink deal job.....");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }


}
