package com.danan.realtime.util;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.danan.realtime.annotation.NotSink;
import com.danan.realtime.common.Constant;
import com.danan.realtime.pojo.KeywordBean;
import com.danan.realtime.pojo.TableProcess;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.connector.jdbc.JdbcStatementBuilder;
import org.apache.flink.shaded.guava18.com.google.common.base.CaseFormat;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.lang.reflect.Field;
import java.nio.charset.StandardCharsets;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * @author NanHuang
 * @Date 2023/1/24
 */
public class SinkUtil {
    public static SinkFunction<Tuple2<JSONObject, TableProcess>> getPhoenixSink() {
        return new MyPhoenixSink();
    }

    public static FlinkKafkaProducer<JSONObject> getKafkaSink(String topic){
        // 1 设置属性
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,ResourceBundleUtil.getProperty("kafka.bootstrap.servers"));
        properties.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,10*60*1000 + "");
        // 2 创建KafkaSink(实质就是Kafka的生产者)
        return new FlinkKafkaProducer<JSONObject>(
                "default",
                new KafkaSerializationSchema<JSONObject>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(JSONObject in, @Nullable Long aLong) {
                        byte[] value = JSON.toJSONString(in).getBytes(StandardCharsets.UTF_8);
                        return new ProducerRecord<>(topic,value);
                    }
                },
                properties,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );
    }

    public static String getKafkaWith(String topic,String... format){
        String formatValue = "json";
        if (format.length != 0) {
            formatValue = format[0];
        }
        return "with (" +
                "   'connector'='kafka'," +
                "   'properties.bootstrap.servers'='" + ResourceBundleUtil.getProperty("kafka.bootstrap.servers") + "'," +
                "   'topic'='" + topic + "'," +
                "   'format'='" + formatValue + "'" +
                ")";
    }

    public static String getUpsertKafkaWith(String topic,String... format){
        String formatValue = "json";
        if (format.length != 0) {
            formatValue = format[0];
        }
        return "with (" +
                "       'connector'='upsert-kafka'," +
                "       'properties.bootstrap.servers'='" + ResourceBundleUtil.getProperty("kafka.bootstrap.servers") + "'," +
                "       'topic'='" + topic + "'," +
                "       'key.format'='" + formatValue + "'," +
                "       'value.format'='" + formatValue + "'" +
                ")";
    }

    public static SinkFunction<Tuple2<JSONObject, TableProcess>> getKafkaSink() {
        // 1 设置属性
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,ResourceBundleUtil.getProperty("kafka.bootstrap.servers"));
        properties.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,10*60*1000 + "");
        // 2 创建KafkaSink(实质就是Kafka的生产者)
        return new FlinkKafkaProducer<Tuple2<JSONObject, TableProcess>>(
                "default",
                new KafkaSerializationSchema<Tuple2<JSONObject, TableProcess>>() {

                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(Tuple2<JSONObject, TableProcess> in, @Nullable Long aLong) {
                        JSONObject data = in.f0;
                        TableProcess config = in.f1;
                        String topic = config.getSinkTable();
                        byte[] value = data.toJSONString().getBytes(StandardCharsets.UTF_8);
                        return new ProducerRecord<>(topic,value);
                    }
                },
                properties,
                FlinkKafkaProducer.Semantic.EXACTLY_ONCE
        );
    }

    public static <T> SinkFunction<T> getClickhouseSink(String table, Class<T> clazz) {
        // 1 拼接导数SQL
        String template = "insert into %s ( %s ) values ( %s )";
        Field[] fields = clazz.getDeclaredFields();
        String columnNames = Arrays.stream(fields)
                .filter(f -> f.getAnnotation(NotSink.class) == null)
                .map(f -> CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, f.getName()))
                .collect(Collectors.joining(","));
        String columnValues = columnNames.replaceAll("[^,]+", "?");
        String sql = String.format(template, table, columnNames, columnValues);
        // 2 给sql中的占位符赋值
        JdbcStatementBuilder<T> builder = new JdbcStatementBuilder<T>() {
            @Override
            public void accept(PreparedStatement ps, T t) {
                String[] columns = columnNames.split(",");
                for (int i = 0; i < columns.length; i++) {
                    try {
                        String column = columns[i];
                        Field field = clazz.getDeclaredField(CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL,column));
                        field.setAccessible(true);
                        Object value = field.get(t);
                        ps.setObject(i + 1, value);
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        };
        // 3 设置运行参数
        JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder()
                .withBatchIntervalMs(2000)
                .withMaxRetries(3)
                .withBatchSize(1024 * 1024)
                .build();
        // 4 设置连接参数
        JdbcConnectionOptions connectionOptions = new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
                .withDriverName(ResourceBundleUtil.getProperty("clickhouse.driver"))
                .withUrl(ResourceBundleUtil.getProperty("clickhouse.url"))
                .build();
        return JdbcSink.sink(
                sql,
                builder,
                executionOptions,
                connectionOptions
        );
    }


}
