package com.youxin.dataStream.kafka;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010;

import java.util.ArrayList;
import java.util.List;
import java.util.Properties;

/**
 * 如果flink开启了checkpoint，支队flinkkafkaProducer09和10可以提供at-least-once的语义，还需要配置下面两个参数
 *      setLogFailuresOnly(false)   true的时候是写失败的情况只打印日志，false的时候会进行重试
 *      setFlushOnCheckpoint(true)
 *
 * 建议修改kafka生产者的重试次数  retries 默认值是 0
 *
 * 针对kafka11
 *      如果flink开启了checkpoint 针对11版本的就可以提供exactly-once的语义
 *      但还需要选择具体的语义
 *          semantic.NONE
 *          Semantic.AT_LEAST_ONCE   (默认的)
 *          Semantic.EXACTLY_ONCE
 *
 *  使用11的精准的一次语义的时候需要修改两个参数
 *      transaction.max.timeout.ms程序里面试一个小时    kafka里面试15分钟    程序里面的比kafka的大，所以需要修改，两边的数据修改任何一方都是可以的
 *
 */
public class KafkaProducerSink {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        DataStream<String> dataSource = env.socketTextStream("hadoop-1",9999);
        //checkpoint配置
        env.enableCheckpointing(5000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //dataSource.join()



        //kafka的相关的配置
        Properties properties = new Properties();
        properties.put("bootstrap.servers","172.16.1.90:9092,172.16.1.91:9092");
        dataSource.addSink(new FlinkKafkaProducer010<String>("11111",new SimpleStringSchema(),properties));


        env.execute("kafka job");

    }
}
