package util;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.types.Row;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.util.Date;
import java.util.Properties;
import java.util.Set;


public class KafkaUtil {

    private static String brokers = "node101:9092";
    private static String default_topic = "DWD_DEFAULT_TOPIC";


    public static FlinkKafkaProducer getKafkaSink(String topic) {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
        properties.put("transaction.timeout.ms", 15 * 60 * 1000);
        return new FlinkKafkaProducer(
                topic,
                new KafkaSerializationSchema() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(Object element, @Nullable Long timestamp) {

                        if (element instanceof String) {
                            return new ProducerRecord<byte[], byte[]>(topic,
                                    element.toString().getBytes());
                        } else if (element instanceof Row) {
                            Row row = (Row) element;
                            Set<String> fieldNames = row.getFieldNames(true);
                            JSONObject all = new JSONObject();
                            for (String fieldName : fieldNames) {
                                all.put(fieldName, row.getField(fieldName));
                            }
                            return new ProducerRecord<byte[], byte[]>(topic,
                                    all.toJSONString().getBytes());
                        } else {
                            return new ProducerRecord<byte[], byte[]>(topic,
                                    JSON.toJSONString(element).getBytes());
                        }

                    }
                }, properties,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
    }


    public static DataStreamSource<String> getKafkaSource(StreamExecutionEnvironment env, String topic) {

        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers(brokers)
                .setTopics(topic)
                .setGroupId("2302a_group_" + new Date().getTime() / 1000)
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();
        return env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka Source");
    }

    //设置kafka参数
//    public static String getKafkaDDL(String topic, String groupId) {
//        return  " 'connector' = 'kafka', " +
//                " 'topic' = '" + topic + "'," +
//                " 'properties.bootstrap.servers' = '" + brokers + "', " +
//                " 'properties.group.id' = '" + groupId + "', " +
//                " 'format' = 'json', " +
//                " 'scan.startup.mode' = 'latest-offset'  ";
//    }

}
