package com.yifeng.repo.flink.data.transport.bootstrap;

import java.io.IOException;
import java.util.*;

import com.yifeng.repo.flink.data.transport.config.*;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.shaded.zookeeper3.io.netty.util.internal.StringUtil;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import com.alibaba.fastjson.JSON;
import com.mysql.cj.util.StringUtils;
import com.ververica.cdc.connectors.base.options.StartupMode;
import com.ververica.cdc.connectors.base.options.StartupOptions;
import com.yifeng.repo.flink.data.transport.streaming.connectors.mysql.MysqlDateTimeConverter;


/**
 * 启动helper类
 * @author lijing
 * @since 2023-03-14
 */
public class BootstrapHelper {
	
	private static final String DATABASE_TABLENAME_CASE_INSENSITIVE = "false";
	
	private static final String DATABASE_CONNECTION_ADAPTER = "logminer";
	
	private static final String LOG_MINING_STRATEGY = "online_catalog";
	
	private static final String LOG_MINING_CONTINUOUS_MINE = "true";
	
	private static final String DEBEZIUM_SNAPSHOT_LOCKING_MODE = "none";
	
	private static final boolean INCLIDE_SCHEMA_CHANGES = true;
	
	private static final boolean SWITCH_LOGFILE = false;
	
	private static final long SWITCH_LOGFILE_MINUTE = 30;
	
	private static final String BIGINT_UNSIGNED_HANDLING_MODE = "long";
	
	private static final String DECIMAL_HANDLING_MODE = "string";
	
	private static final int SOURCE_PARALLELISM_DEFAULT = 1;
	
	private static final int SINK_PARALLELISM_DEFAULT = 3;
	
	private static final int GLOBAL_PARALLELISM_DEFAULT = 1;
	
	private static final int SOURCE_MULTITABLE_PARALLELISM_DEFAULT = 4;
	
	private static final long MIN_PAUSE_BETWEEN_CHECKPOINTS_DEFAULT = 10000L;
	
	private static final long CHECKPOINT_TIMEOUT_DEFAULT = 120000L;
	
	private static final long CHECKPOINT_INTERVAL_DEFAULT = 30000L;
	
	private static final int MAX_CONCURRENT_CHECKPOINT_DEFAULT = 1;
	
	private static final int KAFKA_PARTITION_SIZE = 5;
	
	private static final int TOLERABLE_CHECKPOINT_FAILURE_NUMBER_DEFAULT = 99;
	
	private static final int MAX_BATCH_SIZE = 1000000;
	
	private static final boolean SCAN_NEWLY_ADDED_TABLE_ENABLED = true;
	
	// 设置环境变量
	public static JobParametersConfig setEnvAndParameter(String[] args) throws IOException {
		ParameterTool tool = ParameterTool.fromArgs(args);
        String confUrl = tool.get("base_config_path","");
        ParameterTool paras = ParameterTool.fromPropertiesFile(confUrl);
        String taskConfUrl = tool.get("task_config_path","");
        ParameterTool taskParas = ParameterTool.fromPropertiesFile(taskConfUrl);
        boolean isOnlineCatalog = tool.getBoolean("is_online_catalog",true);
        boolean isSwitchLogfile = tool.getBoolean("is_switch_logfile",SWITCH_LOGFILE);
        boolean isSchemaChangeToDownstream = tool.getBoolean("is_schema_change_to_downstream",false);
		// 配置环境变量
		JobParametersConfig config = new JobParametersConfig();
		config.setCdcTaskname(taskParas.get("cdc.taskname"));
		//设置数据库源的配置
		SourceDbConfig sourceDbConfig = new SourceDbConfig();
		sourceDbConfig.setHostname(paras.get("source.hostname"));
		sourceDbConfig.setPort(paras.getInt("source.port"));
		sourceDbConfig.setDatabase(paras.get("source.database"));
		sourceDbConfig.setSchemaList(taskParas.get("source.schemaList"));
		sourceDbConfig.setTableList(taskParas.get("source.tableList"));
		sourceDbConfig.setUsername(paras.get("source.username"));
		sourceDbConfig.setPassword(paras.get("source.password"));
		sourceDbConfig.setFetchSize(paras.get("source.fetchSize"));
		sourceDbConfig.setParallelism(taskParas.getInt("source.multitable.parallelism",SOURCE_MULTITABLE_PARALLELISM_DEFAULT));
		sourceDbConfig.setColumnIncludeList(taskParas.get("source.columnIncludeList"));
		sourceDbConfig.setColumnExcludeList(taskParas.get("source.columnExcludeList"));
		sourceDbConfig.setSqlMap(taskParas.get("source.sqlMap"));
		config.setSourceDbConfig(sourceDbConfig);
		config.setSourceParallelism(taskParas.getInt("source.parallelism",SOURCE_PARALLELISM_DEFAULT));
		config.setSinkParallelism(taskParas.getInt("sink.parallelism",SINK_PARALLELISM_DEFAULT));
		config.setStartupMode(taskParas.get("startupMode"));
		config.setMaxBatchSize(paras.getInt("maxBatchSize", MAX_BATCH_SIZE));
		config.setOnlineCatalog(taskParas.getBoolean("isOnlineCatalog",isOnlineCatalog));
		config.setIncludeSchemaChange(paras.getBoolean("includeSchemaChange",INCLIDE_SCHEMA_CHANGES));
		config.setSwitchLogfile(taskParas.getBoolean("switchLogfile",isSwitchLogfile));
		config.setSwitchLogfileMinute(taskParas.getLong("switchLogfileMinute",SWITCH_LOGFILE_MINUTE));
		config.setScanNewlyAddedTableEnabled(taskParas.getBoolean("scanNewlyAddedTableEnabled",SCAN_NEWLY_ADDED_TABLE_ENABLED));
		config.setSchemaChangeToDownstream(paras.getBoolean("schemaChangeToDownstream", isSchemaChangeToDownstream));

		config.setStreamExecutionConfig(buildStreamExecutionConfig(paras,taskParas));
		config.setKafkaConfig(buildKafkaConfig(paras,taskParas));
		//设置sink输出到数据库的配置
		SinkDbConfig sinkDbConfig = new SinkDbConfig();
		sinkDbConfig.setHostname(paras.get("sink.hostname"));
		sinkDbConfig.setPort(paras.getInt("sink.port",0));
		sinkDbConfig.setDatabase(paras.get("sink.database"));
		sinkDbConfig.setUsername(paras.get("sink.username"));
		sinkDbConfig.setPassword(paras.get("sink.password"));
		config.setSinkDbConfig(sinkDbConfig);
		return config;
	}

	/**
	 * 设置执行环境变量(全局并行度、checkpoint设置)
	 * @param paras
	 * @param taskParas
	 * @return
	 */
	protected static StreamExecutionConfig buildStreamExecutionConfig(ParameterTool paras, ParameterTool taskParas){
		StreamExecutionConfig streamExecutionConfig = new StreamExecutionConfig();
		streamExecutionConfig.setParallelism(paras.getInt("global.parallelism",GLOBAL_PARALLELISM_DEFAULT));
		streamExecutionConfig.setStateBackend(taskParas.get("checkpoint.stateBackend"));
		streamExecutionConfig.setMinPauseBetweenCheckpoints(paras.getLong("checkpoint.minPauseBetweenCheckpoints",MIN_PAUSE_BETWEEN_CHECKPOINTS_DEFAULT));
		streamExecutionConfig.setMaxConcurrentCheckpoints(paras.getInt("checkpoint.maxConcurrentCheckpoints",MAX_CONCURRENT_CHECKPOINT_DEFAULT));
		streamExecutionConfig.setCheckpointTimeout(paras.getLong("checkpoint.checkpointTimeout",CHECKPOINT_TIMEOUT_DEFAULT));
		streamExecutionConfig.setCheckpointInterval(paras.getLong("checkpoint.checkpointInterval",CHECKPOINT_INTERVAL_DEFAULT));
		streamExecutionConfig.setTolerableCheckpointFailureNumber(paras.getInt("checkpoint.tolerableCheckpointFailureNumber",TOLERABLE_CHECKPOINT_FAILURE_NUMBER_DEFAULT));
		return streamExecutionConfig;
	}

	/**
	 * 设置kafka的配置
	 * @param paras
	 * @param taskParas
	 * @return
	 */
	protected static KafkaConfig buildKafkaConfig(ParameterTool paras, ParameterTool taskParas){
		KafkaConfig kafkaConfig = new KafkaConfig();
		kafkaConfig.setBootstrapServers(paras.get("kafka.bootstrap.servers"));
		kafkaConfig.setUsername(paras.get("kafka.username"));
		kafkaConfig.setPassword(paras.get("kafka.password"));
		kafkaConfig.setAuthPassword(paras.getBoolean("kafka.isauth.password",false));
		kafkaConfig.setPartitionByHash(taskParas.getBoolean("kafka.partitionByHash",true));
		kafkaConfig.setPartitionSize(taskParas.getInt("kafka.partitionSize",KAFKA_PARTITION_SIZE));
		kafkaConfig.setTopic(taskParas.get("kafka.topic"));
		return kafkaConfig;
	}


	/**
	 * 判断Flink cdc的启动模式
	 * @param config 配置
	 * @return 启动模式
	 */
	public static StartupOptions startupOptions(JobParametersConfig config) {
		StartupMode startupMode = StartupMode.valueOf(config.getStartupMode());
		switch (startupMode) {
		case INITIAL:
			return StartupOptions.initial();
		case EARLIEST_OFFSET:
			return StartupOptions.earliest();
		case LATEST_OFFSET:
			return StartupOptions.latest();
		case SPECIFIC_OFFSETS:
			return StartupOptions.specificOffset(config.getSpecificOffsetFile(), config.getSpecificOffsetPos());
		case TIMESTAMP:
			return StartupOptions.timestamp(config.getStartupTimestampMillis());
		default:
			throw new UnsupportedOperationException(startupMode + " mode is not supported.");
		}
	}
	
	/**
	 * oracle的debezium的属性
	 * @return oracle的debezium的属性
	 */
	public static Properties oracleDebeziumProperties(JobParametersConfig config){
        Properties properties = commonDebeziumProperties(config.getSourceDbConfig().getColumnExcludeList(),config.getSourceDbConfig().getColumnIncludeList(),config.isIncludeSchemaChange());
        properties.setProperty("database.tablename.case.insensitive", DATABASE_TABLENAME_CASE_INSENSITIVE);
        properties.setProperty("database.connection.adapter", DATABASE_CONNECTION_ADAPTER);
        // 不设置效率会很慢
        if(config.isOnlineCatalog()) {
        	properties.setProperty("log.mining.strategy", LOG_MINING_STRATEGY);
            properties.setProperty("log.mining.continuous.mine", LOG_MINING_CONTINUOUS_MINE);
        }
        //设置log.mining的批次最大值为100万
        properties.setProperty("log.mining.batch.size.max", String.valueOf(config.getMaxBatchSize()));
        //是否切换logminer
        properties.setProperty("log.mining.switch.logfile", String.valueOf(config.isSwitchLogfile()));
        properties.setProperty("log.mining.switch.logfile.minute", String.valueOf(config.getSwitchLogfileMinute()));
        return properties;
	}
	
	
	/**
	 * mysql的debezium的属性
	 * @param columnIncludeList 包含的列名列表
	 * @param includeSchemaChange 是否下发DDL变更消息到下游系统
	 * @return mysql的debezium的属性
	 */
	public static Properties mysqlDebeziumProperties(String columnExcludeList,String columnIncludeList,boolean includeSchemaChange) {
		Properties properties = commonDebeziumProperties(columnExcludeList,columnIncludeList,includeSchemaChange);
		properties.setProperty("converters", "datetime");
		properties.setProperty("datetime.type", MysqlDateTimeConverter.class.getCanonicalName());
		properties.setProperty("datetime.format.date", "yyyy-MM-dd");
		properties.setProperty("datetime.format.datetime", "yyyy-MM-dd HH:mm:ss");
		properties.setProperty("datetime.format.timestamp", "yyyy-MM-dd HH:mm:ss");
		properties.setProperty("datetime.format.timestamp.zone", "UTC+8");
		properties.setProperty("inconsistent.schema.handling.mode", "warn");
		return properties;
	}
     
	/**
	 * 公共的debezium的属性
	 * @param columnIncludeList 包含的列名列表
	 * @param includeSchemaChange 是否下发DDL变更消息到下游系统
	 * @return debezium的属性
	 */
	private static Properties commonDebeziumProperties(String columnExcludeList,String columnIncludeList,boolean includeSchemaChange) {
		Properties properties = new Properties();
		properties.setProperty("debezium.snapshot.locking.mode",DEBEZIUM_SNAPSHOT_LOCKING_MODE);
        properties.setProperty("include.schema.changes", String.valueOf(includeSchemaChange));
        properties.setProperty("bigint.unsigned.handling.mode",BIGINT_UNSIGNED_HANDLING_MODE);
        properties.setProperty("decimal.handling.mode",DECIMAL_HANDLING_MODE);
        if(!StringUtil.isNullOrEmpty(columnIncludeList)) {
        	properties.setProperty("column.include.list",columnIncludeList);
        }
        if(!StringUtil.isNullOrEmpty(columnExcludeList)) {
        	properties.setProperty("column.exclude.list",columnExcludeList);
        }
        //配置DDL只捕获配置的表
        properties.setProperty("database.history.store.only.monitored.tables.ddl", "true");
        properties.setProperty("database.history.store.only.captured.tables.ddl", "true");
        properties.setProperty("database.history.skip.unparseable.ddl", "true");
        return properties;
	}
	
	public static StreamExecutionEnvironment setEnvironment(StreamExecutionConfig config) throws IOException {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(config.getParallelism());
		
        env.setStateBackend(new RocksDBStateBackend(config.getStateBackend(),true));
        
        // 确保检查点之间有至少 N ms的间隔【checkpoint最小间隔】
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(config.getMinPauseBetweenCheckpoints());

        // 检查点必须在两分钟内完成，或者被丢弃【checkpoint的超时时间】
        env.getCheckpointConfig().setCheckpointTimeout(config.getCheckpointTimeout());

        // 设置检查点，30000毫秒保存一次
        env.getCheckpointConfig().setCheckpointInterval(config.getCheckpointInterval());

        // 设置模式为精确一次 (这是默认值)
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        
        // 同一时间只允许进行一次检查点
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(config.getMaxConcurrentCheckpoints());

        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(config.getTolerableCheckpointFailureNumber());
        
        //表示一旦Flink处理程序被cancel后，会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        return env;
	}
	
	public static Map<String,String> sqlMap(String sql){
		if(StringUtils.isNullOrEmpty(sql)) {
			return new HashMap<>();
		}
		return (Map<String, String>) JSON.parse(sql);
    }

}
