package cn.smileyan.demos;

import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.java.utils.MultipleParameterTool;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.util.Collections;
import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**
 * 将 kafka 数据进行序列化，转换为实体类
 * @author smileyan
 */
@Slf4j
public class FlinkKafkaEntitySinkToExample {
    /**
     * 参数解释：
     *  -bs broker 地址
     *  -kcg kafka consumer group
     *  -it kafka 输入数据 topic
     *  -ot kafka 输出数据 topic
     *  -ct 是否自动创建输入 topic
     *  -pt topic 分区数
     *  -rf topic 副本数
     */
    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        final MultipleParameterTool cmd = MultipleParameterTool.fromArgs(args);
        final String bootstrapServer = cmd.get("bs", "localhost:9092");
        final String kafkaConsumerGroup = cmd.get("kcg", "flink-consumer");
        final String inputTopic = cmd.get("it", "quickstart-events");
        final String outputTopic = cmd.get("ot", "quickstart-results");
        final boolean createTopic = cmd.getBoolean("ct", true);
        final Long transactionTimeout = cmd.getLong("tt", 300000L);

        log.info("broker is {} and topic is {}", bootstrapServer, inputTopic);

        // 如果 topic 不存在，并且开启了由 flink 任务创建 TOPIC。默认不开启，一般情况下，部署人员应当根据实际情况设置不同topic的并行度，副本数
        if (createTopic) {
            final int partitions = cmd.getInt("pt", 1);
            final short replicationFactor = cmd.getShort("rf", (short) 1);
            createTopic(bootstrapServer, inputTopic, partitions, replicationFactor);
        }

        final KafkaSource<Student> kafkaSource = KafkaSource.<Student>builder()
                .setGroupId(kafkaConsumerGroup)
                .setStartingOffsets(OffsetsInitializer.latest())
                .setBootstrapServers(bootstrapServer)
                .setTopics(inputTopic)
                .setValueOnlyDeserializer(new CommonEntitySchema<>(Student.class))
                .build();

        Properties properties = new Properties();
        properties.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, String.valueOf(transactionTimeout));
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);

        final KafkaSink<Student> kafkaSink = KafkaSink.<Student>builder()
                .setKafkaProducerConfig(properties)
                .setRecordSerializer(KafkaRecordSerializationSchema.builder()
                        .setTopic(outputTopic)
                        .setValueSerializationSchema(new CommonEntitySchema<>(Student.class))
                        .build())
                .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
                .build();

        final DataStreamSource<Student> kafkaStream = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "Kafka Source");

        // 过滤掉反序列化失败的对象，只保留正确的对象
        SingleOutputStreamOperator<Student> out1 = kafkaStream.filter(Objects::nonNull)
                .map(student -> {
                    log.info("filter none objects is {}", student);
                    return student;
                });

        // 只选择年纪小于 10 的对象
        SingleOutputStreamOperator<Student> out2 = out1.filter(student -> student.getAge() != null && student.getAge() < 10)
                .map(student -> {
                    log.info("filter age < 10: {}", student);
                    return student;
                });

        out2.sinkTo(kafkaSink);

        env.execute("Flink Kafka Example");
    }

    /**
     * 如果 TOPIC 不存在则创建该 TOPIC
     * @param bootstrapServer kafka broker 地址
     * @param topic 想要创建的 TOPIC
     * @param partitions 并行度
     * @param replicationFactor 副本数
     */
    public static void createTopic(String bootstrapServer,
                                   String topic,
                                   int partitions,
                                   int replicationFactor) throws ExecutionException, InterruptedException {
        Properties adminProperties = new Properties();
        adminProperties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer);
        try (AdminClient adminClient = AdminClient.create(adminProperties)) {
            if (!adminClient.listTopics().names().get().contains(topic)) {
                NewTopic newTopic = new NewTopic(topic, partitions, (short) replicationFactor);
                adminClient.createTopics(Collections.singletonList(newTopic)).all().get();
                log.info("created topic: {}", topic);
            }
        }
    }

    @Data
    static class Student {
        private String name;
        private Integer age;
    }

}
