package utils;

import annotation.NotSink;
import com.alibaba.fastjson.JSONObject;
import beans.TableProcess;
import common.Constant;
import sink.PhoenixSink;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.connector.jdbc.JdbcStatementBuilder;
import org.apache.flink.shaded.guava18.com.google.common.base.CaseFormat;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.lang.reflect.Field;
import java.nio.charset.StandardCharsets;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.Properties;
import java.util.stream.Collectors;


public class FlinkSinkUtil {

    public static SinkFunction<Tuple2<JSONObject, TableProcess>> getPhoenixSink() {

        return new PhoenixSink();
    }

    public static SinkFunction<String> getKafkaSink(String topic) {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constant.KAFKA_BROKERS);
        // 生产的事务超时时间不能超过服务器最大上限(15 分钟)
        props.setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "");

        return new FlinkKafkaProducer<String>("default",   // 默认的 topic, 用不上
                new KafkaSerializationSchema<String>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String element, @Nullable Long timestamp) {
                        System.out.println(element);
                        // key的作用选择分区:  key=null 随机选择分区
                        // element 是一个 json 格式的字符串
                        return new ProducerRecord<>(topic, element.getBytes(StandardCharsets.UTF_8));
                    }
                }, props, FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
    }

    public static SinkFunction<Tuple2<JSONObject, TableProcess>> getKafkaSink() {

        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constant.KAFKA_BROKERS);
        // 生产的事务超时时间不能超过服务器最大上限(15 分钟)
        props.setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "");
        return new FlinkKafkaProducer<Tuple2<JSONObject, TableProcess>>("default", new KafkaSerializationSchema<Tuple2<JSONObject, TableProcess>>() {

            @Override
            public ProducerRecord<byte[], byte[]> serialize(Tuple2<JSONObject, TableProcess> t, @Nullable Long timestamp) {
                String topic = t.f1.getSinkTable();
                byte[] data = t.f0.toJSONString().getBytes(StandardCharsets.UTF_8);

                return new ProducerRecord<>(topic, data);
            }

        }, props, FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
    }

    /**
     * jdbc sink封装成clickhouse
     * 技术：反射 获取所有的字段
     *
     * @param table
     * @param tClass
     * @param <T>
     * @return
     */
    public static <T> SinkFunction<T> getClickHouseSink(String table, Class<T> tClass) {
        // 将来: T 类的属性名要和 table 的字段名保持一致
        String driver = Constant.CLICKHOUSE_DRIVER;
        String url = Constant.CLICKHOUSE_URL;

        //获取所有字段
        Field[] fields = tClass.getDeclaredFields();

        String fieldNames = Arrays.stream(fields).filter(field -> {
            // 获取这个 field 上是否有 NotSink 注解, 如果有返回 false, 否则返回 true
            NotSink notSink = field.getAnnotation(NotSink.class);
            return notSink == null;
        })

                .map(field -> {
                    String name = field.getName();
                    // 驼峰转成下划线
                    return CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, name);
                }).collect(Collectors.joining(","));
        //TODO 拼接一个 sql 语句
        // insert into user(a, b, c, d)values(?,?,?,?)
        StringBuilder sql = new StringBuilder();
        sql.append("insert into ").append(table).append("(")
                // 拼接字段名
                .append(fieldNames).append(")values(")
                // 拼接占位符
                .append(fieldNames.replaceAll("[^,]+", "?")).append(")");
        System.out.println("clickhouse 建表语句: " + sql);


        return getJdbcSink(driver, url, "default", "", sql.toString());
    }

    private static <T> SinkFunction<T> getJdbcSink(String driver, String url, String user, String password, String sql) {
        return JdbcSink.sink(sql, new JdbcStatementBuilder<T>() {
            @Override
            public void accept(PreparedStatement ps, T t) throws SQLException {
                //TODO sql 语句拼接完成之后 给占位符赋值
                // insert into a(stt,edt,source,keyword,keyword_count,ts)values(?,?,?,?,?,?)
                Class<?> tClass = t.getClass();
                // a  NotSink b  c
                Field[] fields = tClass.getDeclaredFields();
                try {
                    for (int i = 0, position = 1; i < fields.length; i++) {
                        Field field = fields[i];
                        if (field.getAnnotation(NotSink.class) == null) {
                            field.setAccessible(true);
                            Object v = field.get(t);
                            // 单独给占位符设置计数器
                            ps.setObject(position++, v);
                        }
                    }
                } catch (IllegalAccessException e) {
                    throw new RuntimeException(e);
                }
            }
        }, new JdbcExecutionOptions.Builder().withBatchIntervalMs(3000)  // 刷新周期
                .withBatchSize(1024 * 1024) // 批次大小
                .withMaxRetries(3) // 重试次数
                .build(), new JdbcConnectionOptions.JdbcConnectionOptionsBuilder().withDriverName(driver).withUrl(url).withUsername(user).withPassword(password).build());
    }

}
