package com.lsx143.realtime.util;

import com.alibaba.fastjson.JSONObject;
import com.lsx143.realtime.bean.TableProcess;
import com.lsx143.realtime.common.Constants;
import lombok.val;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

/**
 * kafka工具类
 */
public class KafkaUtil {
    /**
     * 获取Kafka Source
     *
     * @param groupId 消费者组
     * @param topic   消费主题
     * @return FlinkKafkaConsumer
     */
    public static FlinkKafkaConsumer<String> getKafkaSource(String groupId, String topic) {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constants.KAFKA_BROKERS);
        props.setProperty("group.id", groupId);
        props.setProperty("auto.offset.reset", "latest");
        //设置kafka只读确认提交的数据
        props.setProperty("isolation.level", "read_committed");
        return new FlinkKafkaConsumer<>(topic, new SimpleStringSchema(), props);
    }

    /**
     * 获取kafka SInk
     *
     * @param topic 主题
     * @return FlinkKafkaProducer
     */
    @SuppressWarnings("all")
    public static FlinkKafkaProducer<String> getKafkaSink(final String topic) {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constants.KAFKA_BROKERS);
        props.setProperty("transaction.timeout.ms", String.valueOf(15 * 60 * 1000));
        System.out.println("getKafkaSink");
        //返回一个kafkaProducer
        return new FlinkKafkaProducer<>(
                topic,
                new KafkaSerializationSchema<String>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String ele, @Nullable Long timeStamp) {
                        return new ProducerRecord<>(topic, null, ele.getBytes(StandardCharsets.UTF_8));
                    }
                },
                props,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );
    }

    /**
     * 获取kafka-sink
     */
    @SuppressWarnings("all")
    public static FlinkKafkaProducer<Tuple2<JSONObject, TableProcess>> getKafkaSink() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constants.KAFKA_BROKERS);
        props.setProperty("transaction.timeout.ms", String.valueOf(15 * 60 * 1000));

        //返回一个kafkaProducer
        return new FlinkKafkaProducer<Tuple2<JSONObject, TableProcess>>(
                "defalut_topic",
                new KafkaSerializationSchema<Tuple2<JSONObject, TableProcess>>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(Tuple2<JSONObject, TableProcess> tuple, @Nullable Long timeStamp) {
                        String topic = tuple.f1.getSinkTable();
                        String data = tuple.f0.toJSONString();
                        return new ProducerRecord<>(topic, data.getBytes(StandardCharsets.UTF_8));
                    }
                },
                props,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );
    }
}
