package com.atguigu.realtime.util;

import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.bean.TableProcess;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

/**
 * @ClassName: MyKafkaUtil
 * @Description:
 * @Author: kele
 * @Date: 2021/4/17 9:30
 **/
public class MyKafkaUtil {

    public static FlinkKafkaConsumer<String> getKafkaSource(String topic,String groupId){

        Properties props = new Properties();
        props.put("bootstrap.servers","hadoop162:9092,hadoop163:9092,hadoop164:9092");
        props.put("group.id",groupId);
        props.put("auto.offset.reset", "earliest");
        //使用kafka事务得到精确一次语义，
        //kafka会进行二次提交，如果第一次提交失败，kafka不会消费，只有第二个提交之后，kafka会进行消费
        props.setProperty("isolation.level", "read_committed");  // read_committed or read_uncommitted

        return new FlinkKafkaConsumer<>(topic,new SimpleStringSchema(),props);

    }

    public static FlinkKafkaProducer<String> getKafkaSink(String topic){

        Properties props = new Properties();
        props.setProperty("bootstrap.servers","hadoop162:9092,hadoop163:9092,hadoop164:9092");
        //配置kafka的二次提交时超时时间，设置严格一次的时候需要配置
        //默认kafka的时间是15min，但flink是1h，所以要将flink的时间设置小于15min
        props.setProperty("transaction.timeout.ms", 1000 * 60 * 15 + "");

        /**
         * 在写kafka时，flink的不同并行度，要平均写到kafka的各个分区中，因此要自定义分区
         */
        return new FlinkKafkaProducer<String>(topic,
                new KafkaSerializationSchema<String>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String element,
                                                                    @Nullable Long timestamp) {
                        //设置key为null，默认就是轮询的方式输出
                        return new ProducerRecord<>(topic,null,element.getBytes(StandardCharsets.UTF_8));
                    }
                },
                props,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE //定义语义为严格一次
                );

    }

    /**
     * 由于需要将不同的表发送到不同的topic中，所以重写方法
     * @return
     */
    public static SinkFunction<Tuple2<JSONObject, TableProcess>> getKafkaSink() {

        Properties props = new Properties();
        props.setProperty("bootstrap.servers","hadoop162:9092,hadoop163:9092,hadoop164:9092");
        //配置kafka的二次提交时超时时间，设置严格一次的时候需要配置
        //默认kafka的时间是15min，但flink是1h，所以要将flink的时间设置小于15min
        props.setProperty("transaction.timeout.ms", 1000 * 60 * 15 + "");

        return new FlinkKafkaProducer<Tuple2<JSONObject, TableProcess>>(
                "default",
                new KafkaSerializationSchema<Tuple2<JSONObject, TableProcess>>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(Tuple2<JSONObject, TableProcess> element,
                                                                    @Nullable Long timestamp) {

                        String topic = element.f1.getSinkTable();
                        return new ProducerRecord<>(topic,element.f0.toJSONString().getBytes(StandardCharsets.UTF_8));
                    }
                },
                props,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );

    }
}
