package com.leiyuee.flink.tools;

import com.leiyuee.flink.tools.constants.MySQLConstant;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.connector.jdbc.JdbcStatementBuilder;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.table.data.RowData;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.configuration.FlinkOptions;
import org.apache.hudi.util.HoodiePipeline;

import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * 输出 SinkBuilder
 */
public class SinkBuilder {
    public static SinkFunction<RowData> JdbcSink(String sql, JdbcStatementBuilder<RowData> statementBuilder, String url, String user, String password) {
        return JdbcSink.sink(sql,
                statementBuilder,
                JdbcExecutionOptions.builder()
                        .withBatchSize(1000)
                        .withBatchIntervalMs(200)
                        .withMaxRetries(5)
                        .build(),
                new JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
                        .withUrl(url)
                        // 对于 MySQL 5.7，用"com.mysql.jdbc.Driver"
                        .withDriverName(MySQLConstant.DRIVER)
                        .withUsername(user)
                        .withPassword(password)
                        .build()
        );
    }

    public static HoodiePipeline.Builder sinkHoodie(List<String> columnsList, String pk, String[] partitions,
                                                    String targetTable, String targetHiveTable, String basePath, Map<String, String> map) {
        Map<String, String> options = new HashMap<>();
        options.put(FlinkOptions.PATH.key(), basePath);
        options.put(FlinkOptions.TABLE_TYPE.key(), HoodieTableType.COPY_ON_WRITE.name());
        options.put(FlinkOptions.RECORD_KEY_FIELD.key(), "ID");// write.recordkey.field
        options.put(FlinkOptions.PRECOMBINE_FIELD.key(), "ts");// write.precombine.field
        // startWith write
        options.put(FlinkOptions.OPERATION.key(), "upsert");// write.operation
        options.put(FlinkOptions.WRITE_TASKS.key(), "1");// write.tasks
        options.put(FlinkOptions.WRITE_RATE_LIMIT.key(), "2000");// write.rate.limit
        // startWith compaction
        options.put(FlinkOptions.COMPACTION_TASKS.key(), "1");// compaction.tasks
        options.put(FlinkOptions.COMPACTION_ASYNC_ENABLED.key(), "true");// compaction.async.enabled
        options.put(FlinkOptions.COMPACTION_TRIGGER_STRATEGY.key(), "num_commits");// compaction.trigger.strategy
        options.put(FlinkOptions.COMPACTION_DELTA_COMMITS.key(), "1");// compaction.delta_commits
        // startWith changelog
        options.put(FlinkOptions.CHANGELOG_ENABLED.key(), "true");// changelog.enabled
        // startWith read
        options.put(FlinkOptions.READ_AS_STREAMING.key(), "true");// read.streaming.check-interval
        options.put(FlinkOptions.READ_STREAMING_CHECK_INTERVAL.key(), "3");// read.streaming.check-interval
        // startWith hive_sync
        options.put(FlinkOptions.HIVE_SYNC_ENABLED.key(), "true");// hive_sync.enable
        options.put(FlinkOptions.HIVE_SYNC_MODE.key(), "hms");// hive_sync.mode
        options.put(FlinkOptions.HIVE_SYNC_TABLE.key(), targetHiveTable);// hive_sync.table
        options.put(FlinkOptions.HIVE_SYNC_SUPPORT_TIMESTAMP.key(), "true");// hive_sync.support_timestamp


        if (map != null) {
            map.keySet().forEach(item -> {
                if(options.containsKey(item) && map.containsKey(item)) {
                    options.replace(item, map.get(item));
                } else  {
                    options.put(item, map.get(item));
                }
            });
        }

        HoodiePipeline.Builder builder = HoodiePipeline.builder(targetTable);

        // 假设 columns_list 返回的是一个 List<String> 数据类型的 List 集合
        columnsList.forEach(builder::column);

        if (partitions != null && !partitions[0].equals("")) {
            builder.partition(partitions);
        }

        builder.pk(pk).options(options);

        return builder;
    }

}
