package com.hb.resource.utils;


import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.CreateTopicsResult;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ExecutionException;


/**
 * 卡夫卡工具类
 *
 * @author hb
 * @date 2023/04/23
 */

@Slf4j
public class KafkaUtil {


    /**
     * kafka的broker地址
     */
    private static String KAFKA_SERVER = null;
    private static AdminClient adminClient;
    static {
        Properties properties = new Properties();

        InputStream in = KafkaUtil.class.getClassLoader().getResourceAsStream("application.properties");

        try {
            properties.load(in);
        } catch (IOException e) {
            log.error("加载kafka配置文件失败,{}", e);
        }

        //获取key配置对应的value
        KAFKA_SERVER = properties.getProperty("kafka.servers");
        // 创建管理者
        adminClient = AdminClient.create(properties);
    }

    /**
     * 自动创建话题
     *
     * @param topicName     主题名称
     * @param numPartitions 分区数量
     * @param replicationNum 副本
     * @throws ExecutionException   执行异常
     * @throws InterruptedException 中断异常
     */
    public static void autoCreateTopic(String topicName, Integer numPartitions, short replicationNum) throws ExecutionException, InterruptedException {
        try {
            // 创建topic前，可以先检查topic是否存在，如果已经存在，则不用再创建了
            Set<String> topics = adminClient.listTopics().names().get();
            if (topics.contains(topicName)) {
                return ;
            }

            // 创建topic 3 1 主题，分区数，副本
            NewTopic newTopic = new NewTopic(topicName, numPartitions, replicationNum);
            CreateTopicsResult result = adminClient.createTopics(Collections.singletonList(newTopic));
            result.all().get();
        } finally {
            adminClient.close();
        }
        return ;
    }

    /**
     * 获取flink的kafka消费者
     *
     * @param topic
     * @param groupId
     * @return
     */
    public static FlinkKafkaConsumer<String> getKafkaConsumer(String topic, String groupId) {
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_SERVER);
        return new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);

    }


    /**
     * 获取flink的kafka生产者
     *
     * @param topic
     * @return
     */
    public static FlinkKafkaProducer<String> getKafkaProducer(String topic) {
        return new FlinkKafkaProducer<String>(KAFKA_SERVER, topic, new SimpleStringSchema());
    }

}
