package com.yifeng.repo.flink.data.transport.utils.kafka;

import java.util.Objects;
import java.util.Properties;

import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.kafka.clients.producer.ProducerConfig;

import com.yifeng.repo.flink.data.transport.config.KafkaConfig;
import com.yifeng.repo.flink.data.transport.dto.CdcTypeEnum;

public class KafkaUtil {
	private static final String KAFKA_TOPIC_PREFIX = "tp";

	private static final String KAFKA_TOPIC_SEPARATOR = "_";

	private static final String FULL_TOPIC_SUFFIX = "f";

	private static final String INCRE_TOPIC_SUFFIX = "i";

	private static final int TOPIC_PARTITION_SIZE = 5;
	
	private static final String DEFAULT_TOPIC = "test";
	
	private static final int TRANSACTION_TIMEOUT_MS = 1000 * 60 * 5; 
	
	private static final String ACKS = "all";
	
	private static final String MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION = "1"; 
	
	private static final String RETRIES = "5";
	
	private static final String MAX_REQUEST_SIZE = "5242780";
	
	private static final String ENABLE_IDEMPOTENCE = "true";
	
	private static final String SECUTITY_PROTOCOL = "SASL_PLAINTEXT";
	
	private static final String SASL_MECHANISM = "SCRAM-SHA-256";
	
	private static final String COMPRESSION_TYPE = "lz4";
	
	private static final String KEY_SERIALIZER_CLASS = "org.apache.kafka.common.serialization.StringSerializer";
	
	private static final String VALUE_SERIALIZER_CLASS = "org.apache.kafka.common.serialization.ByteArraySerializer";
	

	/**
	  * 动态生产到不同的topic，如果不传topic，则自动生产到默认的topic
	  * @param T  序列化后的数据，可指定topic
	*/
    public static <T> FlinkKafkaProducer<T> getKafkaBySchema(KafkaSerializationSchema<T> T,String transactionId,KafkaConfig kafkaConfig){
		Properties pros = getProperties(transactionId,kafkaConfig);
		pros.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KEY_SERIALIZER_CLASS);
		pros.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, VALUE_SERIALIZER_CLASS);
        return new FlinkKafkaProducer<T>(DEFAULT_TOPIC,T,pros,FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
    }
    
    public static FlinkKafkaProducer<String> getKafkaBySchema(SimpleStringSchema simpleStringSchema,String transactionId,KafkaConfig kafkaConfig){
    	Properties pros = getProperties(transactionId,kafkaConfig);
        return new FlinkKafkaProducer<String>(DEFAULT_TOPIC,simpleStringSchema,pros);
    }
    
    private static Properties getProperties(String transactionId,KafkaConfig kafkaConfig) {
    	Properties pros = new Properties();
    	// 写入到Kafka
    	pros.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaConfig.getBootstrapServers());
    	
    	//等待事务状态变更的最长时间 the transaction timeout must be larger than the checkpoint interval, but smaller than the broker transaction.max.timeout.ms.
        pros.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, String.valueOf(TRANSACTION_TIMEOUT_MS));
       
        pros.put(ProducerConfig.ACKS_CONFIG, ACKS);
        
        // 设置为1保证严格有序
		pros.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION);
        
		// 设置了retries参数，可以在Kafka的Partition发生leader切换时，Flink不重启，而是做5次尝试：
		pros.setProperty(ProducerConfig.RETRIES_CONFIG, RETRIES);
		
		//发送的数据量最大为5M(发送端设置稍微小于5M（5242880）)
		pros.setProperty(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, MAX_REQUEST_SIZE);
       
		// 开启幂等性 excatly-once 的时候必须设置
		pros.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, ENABLE_IDEMPOTENCE);
		
		//事务ID
		pros.setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionId);
		
		//kafka配置
		if(kafkaConfig.isAuthPassword()) {
			pros.setProperty("security.protocol", SECUTITY_PROTOCOL);
			pros.setProperty("sasl.mechanism", SASL_MECHANISM);
			String jaasConfig = "org.apache.kafka.common.security.scram.ScramLoginModule required username=" + kafkaConfig.getUsername() + " password=" + kafkaConfig.getPassword() + ";";
			pros.setProperty("sasl.jaas.config", jaasConfig);
		}
		pros.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, COMPRESSION_TYPE);
		return pros;
    }
    
    /**
     * kafka消费端的属性设置
     * @param kafkaConfig kafka的配置
     * @return 属性值
     */
    public static Properties getConsumerProperties(KafkaConfig kafkaConfig) {
    	Properties pros = new Properties();
		//kafka配置
		if(kafkaConfig.isAuthPassword()) {
			pros.setProperty("security.protocol", SECUTITY_PROTOCOL);
			pros.setProperty("sasl.mechanism", SASL_MECHANISM);
			String jaasConfig = "org.apache.kafka.common.security.scram.ScramLoginModule required username=" + kafkaConfig.getUsername() + " password=" + kafkaConfig.getPassword() + ";";
			pros.setProperty("sasl.jaas.config", jaasConfig);
		}
		return pros;
    }
    
    /**
             * 拼接kafka的topic
     * @param database 库名
     * @param type full：全量  incre: 增量
     * @return kafka的topic
     */
    public static String spliceTopic(String database,String type) {
    	String topic = KAFKA_TOPIC_PREFIX + KAFKA_TOPIC_SEPARATOR + database.toLowerCase().trim();
    	if(CdcTypeEnum.FULL.getType().equals(type)) {
    		return topic + KAFKA_TOPIC_SEPARATOR + FULL_TOPIC_SUFFIX;
    	}
    	if(CdcTypeEnum.INCRE.getType().equals(type)) {
    		return topic + KAFKA_TOPIC_SEPARATOR + INCRE_TOPIC_SUFFIX;
    	}
    	throw new RuntimeException("cdc全量增量类型异常: " + type);
    }
    
	/**
	 * 获得kafka的分区
	 * @param tb 表名
	 * @param partitionSize 分区的size，现在TOPIC是默认5个分区
	 * @return 分区
	 */
	public static int choosePartition(String tb,Integer partitionSize) {
		if(Objects.isNull(partitionSize)) {
			partitionSize = TOPIC_PARTITION_SIZE;
		}
		int partition = Objects.hash(tb.toUpperCase()) % partitionSize;
		if(partition < 0) {
			partition = partition + partitionSize;
		}
		return partition;
	}
	
}
