package com.atguigu.dataStreamApi.sink;

import com.alibaba.fastjson.JSON;
import com.atguigu.pojo.Event;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;
import util.SourceUtil;

/*
* 将数据写到外部kafka，此时flink是作为生产者的
* 1.创建生产者对象（直接new，然后传配置）
* new kafkaProducer(properties)
*
* 2.生产者配置
*   1）集群位置：bootstrap.servers
*   2)key和value的序列化
*       key.serializer
*       value.serializer
*   3)分区
*      指定分区号，发送到指定分区
*      未指定分区号，有key，按照key/分区数取模得分区号
*      未指定分区号，且未设定key值，选择默认分区器，采用粘性分区策略。随机产生一个分区号，
*      持续向该分区发送数据达到阈值后，再依据权重产生新的分区号，发送数据
*   4）应答级别
*       ack: 0,-1,1
*   5)事务超时时间
*       transaction.timeout.ms
*   6)事务ID
*       transaction.id
* 3.KafkaSink  DeliveryGuarantee.EXACTLY_ONCE 注意事项:
 *      1) The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms).
 *         Kafka集群限制的事务的最大超时时间: transaction.max.timeout.ms = 900000 (15 minutes)
 *         生产者事务超时时间: transaction.timeout.ms = 60000 (1 minute)
 *         Flink KakfaSink默认的生产者事务超时时间 DEFAULT_KAFKA_TRANSACTION_TIMEOUT = Duration.ofHours(1);
*
* */

public class Flink02_KafkaSink {
     public static void main(String[] args) {
             StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
             env.setParallelism(1);

             //开始检查点，不然会报错，下午测试
         env.enableCheckpointing(2000L);


         DataStreamSource<Event> ds
                 = env.fromSource(SourceUtil.getSource(), WatermarkStrategy.noWatermarks(), "dataGenSource");
         SingleOutputStreamOperator<String> mapDs = ds.map(JSON::toJSONString);

         //将数据写到kafka TopicA主题中，设置生产者参数
         KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                 .setBootstrapServers("hadoop102:9092,hadoop103:9092")
                 .setRecordSerializer(//todo setRecordSerializer 序列化的意思
                         KafkaRecordSerializationSchema.builder()
                                 .setTopic("topicF")
                                 .setValueSerializationSchema(
                                         new SimpleStringSchema()
                                 )
                                 .build()
                 )
                 //.setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)//至少一次
                 .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                 .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,"60000")
                 .setTransactionalIdPrefix("flink" + System.currentTimeMillis())
                 .build();

         mapDs.sinkTo(kafkaSink);//mapDs数据sink位置（kafkaSink）


         try {
                 env.execute();
             } catch (Exception e) {
                 throw new RuntimeException(e);
             }
         }
}
