package com.atguigu.flink.sql.connector;

import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;

/**
 * Created by 黄凯 on 2023/6/26 0026 16:46
 *
 * @author 黄凯
 * 永远相信美好的事情总会发生.
 *
 * UpsertKafkaConnector ： 用于Kafka数据的读写。
 */
public class Flink04_UpsertKafkaConnector {

    public static void main(String[] args) {

        //1. 创建表环境
        TableEnvironment tableEnv =
                TableEnvironment.create(EnvironmentSettings.newInstance().inStreamingMode().build());

        //2. Kafka Connector read
        String readSql =
                " create table t1 (" +
                        " id STRING , " +
                        " vc INT , " +
                        " ts BIGINT " +
                        ") WITH (" +
                        " 'connector' = 'kafka', " +
                        " 'topic' = 'topicA', " +
                        " 'properties.bootstrap.servers' = 'hadoop102:9092', " +
                        " 'properties.group.id' = 'flinksql', " +
                        " 'scan.startup.mode' = 'latest-offset', " +
                        " 'value.format' = 'csv' " +
                        ")" ;

        tableEnv.executeSql(readSql) ;

        Table table = tableEnv.sqlQuery("select id , sum(vc) svc from t1 group by id ");
        tableEnv.createTemporaryView("t2" , table);

        //  Upsert kafka Connector write
        // Table sink 'default_catalog.default_database.t3' doesn't support consuming update changes
        // which is produced by node GroupAggregate(groupBy=[id], select=[id, SUM(vc) AS svc])
        String writeSql =
                " create table t3 (" +
                        " id STRING , " +
                        " svc INT , " +
                        " PRIMARY KEY (id) NOT ENFORCED " +
                        ") WITH (" +
                        " 'connector' = 'upsert-kafka', " +
                        " 'topic' = 'topicB', " +
                        " 'properties.bootstrap.servers' = 'hadoop102:9092', " +
                        //" 'sink.transactional-id-prefix' = 'flink" + RandomUtils.nextInt(0,100) + "' ," +
                        " 'key.format' = 'csv'," +
                        " 'value.format' = 'csv' " +
                        ")" ;
        tableEnv.executeSql(writeSql) ;

        //从t2表查， 往t3表写
        tableEnv.executeSql("insert into t3 select * from t2") ;

    }

}
