package com.yifeng.repo.flink.data.transport.bootstrap;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;

import com.alibaba.fastjson.JSON;
import com.mysql.cj.util.StringUtils;
import com.ververica.cdc.connectors.base.options.StartupMode;
import com.yifeng.repo.flink.data.transport.config.JobParametersConfig;
import com.yifeng.repo.flink.data.transport.config.KafkaConfig;
import com.yifeng.repo.flink.data.transport.config.SinkDbConfig;
import com.yifeng.repo.flink.data.transport.config.StreamExecutionConfig;


/**
 * 消费kafka启动helper类
 * @author lijing
 * @since 2023-06-16
 */
public class kafkaBootstrapHelper {
	
	private static final int SOURCE_PARALLELISM_DEFAULT = 1;
	
	private static final int SINK_PARALLELISM_DEFAULT = 3;
	
	private static final int GLOBAL_PARALLELISM_DEFAULT = 1;
	
	private static final long MIN_PAUSE_BETWEEN_CHECKPOINTS_DEFAULT = 10000L;
	
	private static final long CHECKPOINT_TIMEOUT_DEFAULT = 120000L;
	
	private static final long CHECKPOINT_INTERVAL_DEFAULT = 30000L;
	
	private static final int MAX_CONCURRENT_CHECKPOINT_DEFAULT = 1;
	
	private static final int KAFKA_PARTITION_SIZE = 5;
	
	private static final int TOLERABLE_CHECKPOINT_FAILURE_NUMBER_DEFAULT = 99;
	
	
	// 设置环境变量
	public static JobParametersConfig setEnvAndParameter(String[] args) throws IOException {
		ParameterTool tool = ParameterTool.fromArgs(args);
        String confUrl = tool.get("config_path","");
        ParameterTool confParas = ParameterTool.fromPropertiesFile(confUrl);
		// 配置环境变量
		JobParametersConfig config = new JobParametersConfig();
		config.setCdcTaskname(confParas.get("cdc.taskname"));
		//设置数据库源的配置
		config.setSourceParallelism(confParas.getInt("source.parallelism",SOURCE_PARALLELISM_DEFAULT));
		config.setSinkParallelism(confParas.getInt("sink.parallelism",SINK_PARALLELISM_DEFAULT));
		config.setStartupMode(confParas.get("startupMode"));
		//设置执行环境变量(全局并行度、checkpoint设置)
		StreamExecutionConfig streamExecutionConfig = new StreamExecutionConfig();
		streamExecutionConfig.setParallelism(confParas.getInt("global.parallelism",GLOBAL_PARALLELISM_DEFAULT));
		streamExecutionConfig.setStateBackend(confParas.get("checkpoint.stateBackend"));
		streamExecutionConfig.setMinPauseBetweenCheckpoints(confParas.getLong("checkpoint.minPauseBetweenCheckpoints",MIN_PAUSE_BETWEEN_CHECKPOINTS_DEFAULT));
		streamExecutionConfig.setMaxConcurrentCheckpoints(confParas.getInt("checkpoint.maxConcurrentCheckpoints",MAX_CONCURRENT_CHECKPOINT_DEFAULT));
		streamExecutionConfig.setCheckpointTimeout(confParas.getLong("checkpoint.checkpointTimeout",CHECKPOINT_TIMEOUT_DEFAULT));
		streamExecutionConfig.setCheckpointInterval(confParas.getLong("checkpoint.checkpointInterval",CHECKPOINT_INTERVAL_DEFAULT));
		streamExecutionConfig.setTolerableCheckpointFailureNumber(confParas.getInt("checkpoint.tolerableCheckpointFailureNumber",TOLERABLE_CHECKPOINT_FAILURE_NUMBER_DEFAULT));
		config.setStreamExecutionConfig(streamExecutionConfig);
		//设置kafka的配置
		KafkaConfig kafakConfig = new KafkaConfig();
		kafakConfig.setBootstrapServers(confParas.get("kafka.bootstrap.servers"));
		kafakConfig.setUsername(confParas.get("kafka.username"));
		kafakConfig.setPassword(confParas.get("kafka.password"));
		kafakConfig.setAuthPassword(confParas.getBoolean("kafka.isauth.password",false));
		kafakConfig.setPartitionByHash(confParas.getBoolean("kafka.partitionByHash",true));
		kafakConfig.setPartitionSize(confParas.getInt("kafka.partitionSize",KAFKA_PARTITION_SIZE));
		kafakConfig.setTopic(confParas.get("kafka.topic"));
		kafakConfig.setGroupId(confParas.get("kafka.group"));
		config.setKafkaConfig(kafakConfig);
		//设置sink输出到数据库的配置
		SinkDbConfig sinkDbConfig = new SinkDbConfig();
		sinkDbConfig.setJdbcUrl(confParas.get("sink.jdbcUrl"));
		sinkDbConfig.setHostname(confParas.get("sink.hostname"));
		sinkDbConfig.setPort(confParas.getInt("sink.port",0));
		sinkDbConfig.setDatabase(confParas.get("sink.database"));
		sinkDbConfig.setUsername(confParas.get("sink.username"));
		sinkDbConfig.setPassword(confParas.get("sink.password"));
		sinkDbConfig.setPoolSize(confParas.getInt("sink.poolSize",5));
		sinkDbConfig.setPkMap(transMap(confParas.get("sink.pkInfo")));
		sinkDbConfig.setTableMappingMap(transMap(confParas.get("sink.tableMappingInfo")));
		sinkDbConfig.setKeyWord(confParas.get("sink.keyword"));
		sinkDbConfig.setNeedTransferKeyWord(confParas.getBoolean("sink.isNeedTransferKeyWord",false));
		sinkDbConfig.setKeyWordSupplement(confParas.get("sink.keyWordSupplement"));
		sinkDbConfig.setNeedDealKeywordTable(confParas.get("sink.needDealKeywordTable"));
		config.setSinkDbConfig(sinkDbConfig);
		return config;
	}
	
	public static Map<String,String> transMap(String info){
		if(StringUtils.isNullOrEmpty(info)) {
			return new HashMap<>();
		}
		return (Map<String, String>) JSON.parse(info);
    }
	
	/**
	 * 判断Flink cdc的启动模式
	 * @param config 配置
	 * @return 启动模式
	 */
	public static OffsetsInitializer offsetsInitializer(JobParametersConfig config) {
		StartupMode startupMode = StartupMode.valueOf(config.getStartupMode());
		switch (startupMode) {
		case EARLIEST_OFFSET:
			return OffsetsInitializer.earliest();
		case LATEST_OFFSET:
			return OffsetsInitializer.latest();
		case TIMESTAMP:
			return OffsetsInitializer.timestamp(config.getStartupTimestampMillis());
		default:
			throw new UnsupportedOperationException(startupMode + " mode is not supported.");
		}
	}

}
