package com.atguigu.sql.connector;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Flink03_KafkaConnector {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //开启检查点
        env.enableCheckpointing(2000L);
        //流表环境
        StreamTableEnvironment streamTableEnv = StreamTableEnvironment.create(env);

        //kafkasource
        String sourceTable =
                "create table t_source(" +
                        "id STRING, " +
                        "vc INT, " +
                        "ts BIGINT ," +
                        "`topic` STRING NOT NULL METADATA," +
                        "`partition` INT NOT NULL METADATA," +
                        "`offset` BIGINT NOT NULL METADATA" +
                 " ) WITH ( " +
                        "'connector' = 'kafka'," +
                        "'topic' = 'topicA'," +
                        "'properties.bootstrap.servers' = 'hadoop102:9092,hadoop103:9092,hadoop104:9092'," +
                        "'properties.group.id' = 'flinksql240620'," +
                        "'format' = 'csv'," +
                        "'scan.startup.mode' = 'latest-offset'" +
                        //"'properties.配置项' = '配置值'" //其它配置使用该方式来配置
                 ")";
        streamTableEnv.executeSql(sourceTable);

        //查询数据
        Table resultTable = streamTableEnv.sqlQuery("select id ,vc ,ts,`topic`, `partition`, `offset` from t_source");

        //resultTable.execute().print();

        streamTableEnv.createTemporaryView("result_table",resultTable);

        //将表的结果写出带kafka
        String sinkTable =
                "create table t_sink ( " +
                        " id STRING," +
                        " vc INT , " +
                        " ts BIGINT, " +
                        " tp STRING , " +
                        " pt INT , " +
                        " ot BIGINT " +
                        " ) WITH ( " +
                        " 'connector' = 'kafka', " +
                        " 'topic' = 'topicG' , " +
                        " 'properties.bootstrap.servers' = 'hadoop102:9092,hadoop103:9092,hadoop104:9092' ,  " +
                        " 'format' = 'json' , " +
                        //" 'sink.delivery-guarantee' = 'at-least-once' " +
                        " 'sink.delivery-guarantee' = 'exactly-once' , " +
                        " 'sink.transactional-id-prefix' = 'flink-" +System.currentTimeMillis()+ "'," +  //todo 精准一次必须加前缀
                        " 'properties.transaction.timeout.ms' = '600000' " +
                        //" 'sink.parallelism' = '1'"
                        " ) " ;
        streamTableEnv.executeSql(sinkTable);


        //todo 写出，程序运行时linux上的kafka消费者可以消费数据，因为它是读未提交
        //todo 程序端可以新起一个消费者运行看能否消费到数据
        streamTableEnv.executeSql("insert into t_sink select id, vc, ts,`topic`,`partition`,`offset` from result_table");


    }
}
