package com.atlocal.utils;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.List;
import java.util.Properties;
import java.util.regex.Pattern;

/**
 * @ClassName MyKafkaUtil
 * @Description TOO
 * @Author kongjiangjiang
 * @Date 2022/10/10 15:53
 * @Version 1.0
 **/
@SuppressWarnings("all")
public class MyKafkaUtil {
    private static final String BOOTSTRAP_SERVERS = "sssc-kafka1:9092";
    private static final String KAFKA_SERVER = "sssc-kafka1:9092,sssc-kafka2:9092,sssc-kafka3:9092";

    static Properties prop = new Properties();
    final static SimpleStringSchema simpleStringSchema = new SimpleStringSchema();


    /**
     * 动态消费topic
     *
     * @param env
     * @param groupId
     * @param synchronizationPolicy
     * @return
     */
    public static DataStreamSource<String> getKafkaSourceDy(StreamExecutionEnvironment env, String groupId, boolean synchronizationPolicy) {
        prop.setProperty("bootstrap.servers", KAFKA_SERVER);
        prop.setProperty("flink.partition-discovery.interval-millis", "30000");
        prop.setProperty("fetch.message.max.bytes", "10000");
        prop.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.setProperty("group.id", groupId);

        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(
                Pattern.compile("pro_([A-Za-z0-9_]*)$"),
                simpleStringSchema,
                prop);
        if (synchronizationPolicy) { //为true 表示同步历史数据
            kafkaConsumer.setStartFromEarliest();
        } else {//为false 表示从最新位置开始消费数据
            kafkaConsumer.setStartFromLatest();
        }

        DataStreamSource<String> dataStreamSource = env.addSource(kafkaConsumer);

        return dataStreamSource;
    }


    /**
     * 分组消费Kafka数组
     *
     * @param env
     * @param groupId
     * @param getKafkaSourceDyGroup
     * @return
     */
    public static DataStreamSource<String> getKafkaSourceDyGroup(StreamExecutionEnvironment env, String groupId, boolean synchronizationPolicy, List topicList, String kafka_server) {
        prop.setProperty("bootstrap.servers", kafka_server);
        prop.setProperty("flink.partition-discovery.interval-millis", "30000");
        prop.setProperty("fetch.message.max.bytes", "10000");
        prop.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.setProperty("group.id", groupId);

        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(
                topicList,
                simpleStringSchema,
                prop);
        if (synchronizationPolicy) { //为true 表示同步历史数据
            kafkaConsumer.setStartFromEarliest();
        } else {//为false 表示从最新位置开始消费数据
            kafkaConsumer.setStartFromLatest();
        }
        DataStreamSource<String> dataStreamSource = env.addSource(kafkaConsumer);
        return dataStreamSource;
    }


    public static DataStreamSource<String> getKafkaSource(StreamExecutionEnvironment env, String topic, String groupId, boolean synchronizationPolicy) {
        prop.setProperty("bootstrap.servers", KAFKA_SERVER);
        prop.setProperty("flink.partition-discovery.interval-millis", "30000");
        prop.setProperty("fetch.message.max.bytes", "10000");
        prop.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//        prop.setProperty("auto.offset.rest", consumepolic);
        prop.setProperty("group.id", groupId);

        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(topic, simpleStringSchema, prop);
        if (synchronizationPolicy) { //为true 表示同步历史数据
            kafkaConsumer.setStartFromEarliest();
        } else {//为false 表示从最新位置开始消费数据
            kafkaConsumer.setStartFromLatest();
        }

        DataStreamSource<String> dataStreamSource = env.addSource(kafkaConsumer);

        return dataStreamSource;
    }

    public static DataStreamSource<String> getKafkaSourceLatest(StreamExecutionEnvironment env, String topic, String groupId) {
        prop.setProperty("bootstrap.servers", KAFKA_SERVER);
        prop.setProperty("flink.partition-discovery.interval-millis", "30000");
        prop.setProperty("fetch.message.max.bytes", "10000");
        prop.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.setProperty("group.id", groupId);

        FlinkKafkaConsumer<String> kafkaConsumer = new FlinkKafkaConsumer<>(topic, simpleStringSchema, prop);
        //最新的
//        kafkaConsumer.setStartFromLatest();
        //最早的
        kafkaConsumer.setStartFromEarliest();
        DataStreamSource<String> dataStreamSource = env.addSource(kafkaConsumer);

        return dataStreamSource;
    }


    public static void getKafkaSink(SingleOutputStreamOperator<String> sinkStream, String sinkTopic) {
        prop.put("bootstrap.servers", KAFKA_SERVER);
        System.out.println(sinkTopic);
        sinkStream.addSink(new FlinkKafkaProducer<String>(sinkTopic, simpleStringSchema, prop));
    }


    /**
     * Kafka-Source DDL 语句
     *
     * @param topic   数据源主题
     * @param groupId 消费者组
     * @return 拼接好的 Kafka 数据源 DDL 语句
     */
    public static String getKafkaDDL(String topic, String groupId) {
        return " with ('connector' = 'kafka', " +
                " 'topic' = '" + topic + "'," +
                " 'properties.bootstrap.servers' = '" + BOOTSTRAP_SERVERS + "', " +
                " 'properties.group.id' = '" + groupId + "', " +
                " 'format' = 'json', " +
                " 'sink.parallelism' = '5', " +
//                " 'scan.startup.mode' = 'earliest-offset')";
                " 'scan.startup.mode' = 'latest-offset')";
    }


    /**
     * Kafka-Sink DDL 语句
     *
     * @param topic 输出到 Kafka 的目标主题
     * @return 拼接好的 Kafka-Sink DDL 语句
     */
    public static String getUpsertKafkaDDL(String topic) {

        return "WITH ( " +
                "  'connector' = 'upsert-kafka', " +
                "  'topic' = '" + topic + "', " +
                "  'properties.bootstrap.servers' = '" + BOOTSTRAP_SERVERS + "', " +
                "  'key.format' = 'json', " +
                "  'value.format' = 'json' " +
                ")";
    }


    public static String getDimInfoLookUp() {

        return ") WITH ( " +
                "  'connector' = 'jdbc', " +
                "  'url' = 'jdbc:mysql://10.30.64.207:3306/dim', " +
                "  'username' = 'root', " +
                "  'password' = '123456', " +
                "  'lookup.cache.max-rows' = '10', " +
                "  'lookup.cache.ttl' = '1 hour', " +
                "  'driver' = 'com.mysql.jdbc.Driver', " +
                "  'table-name' = 'dim_master_data_code_table_orc' " +
                ")";
    }


    public static String getDimInfoLookUp(String tableName) {

        return ") WITH ( " +
                "  'connector' = 'jdbc', " +
                "  'url' = 'jdbc:mysql://10.30.64.207:3306/dim', " +
                "  'username' = 'root', " +
                "  'password' = '123456', " +
                "  'lookup.cache.max-rows' = '10', " +
                "  'lookup.cache.ttl' = '1 hour', " +
                "  'driver' = 'com.mysql.jdbc.Driver', " +
                "  'table-name' = '" + tableName + "' " +
                ")";
    }

    public static String DimInfoAll() {
        //创建LookUp表
        String dimSql = "" +
                "CREATE TEMPORARY TABLE dim_info_all ( " +
                "master_data_sub_coding STRING, " +
                "md_level STRING, " +
                "sub_meaning_of_master_data STRING, " +
                "_c7 STRING, " +
                "_c8 STRING, " +
                "original_level_coding STRING, " +
                "_c11 STRING, " +
                "_c0 STRING, " +
                "master_data_coding STRING, " +
                "crt_date STRING, " +
                "_c2 STRING, " +
                "md_eff_date STRING "
                + getDimInfoLookUp();
        return dimSql;
    }

    public static String DimInfoAll(String tableName, String field1, String field2, String field3) {
        StringBuilder builder = new StringBuilder();
        //创建LookUp表
        builder
                .append("CREATE TEMPORARY TABLE ").append(tableName).append(" (")
                .append(field1).append(" STRING,").append(field2).append(" STRING,").append(field3).append(" STRING ")
                .append(getDimInfoLookUp(tableName));


        return builder.toString();
    }

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //创建LookUp表
        String dimInfo = DimInfoAll("dim_h_fin", "ods_sub_cde", "md_parent_cde", "md_parent_nme");

        System.out.println(dimInfo);
        tableEnv.executeSql(dimInfo);
        tableEnv.executeSql("select * from dim_h_fin").print();

        env.execute();
    }


}
