package com.atguigu.sql.connector;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Flink04_UpsertKafkaConnector {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.enableCheckpointing(2000L);

        //流表环境
        StreamTableEnvironment streamTableEnv = StreamTableEnvironment.create(env);

        //kafkaSource
        String sourceTable =
                "create table t_source (" +
                        "id STRING," +
                        "vc INT," +
                        "ts BIGINT" +
                 " ) WITH ( " +
                        "'connector'='kafka'," +
                        "'topic'='topicA'," +
                        "'properties.bootstrap.servers'='hadoop102:9092,hadoop103:9092,hadoop104:9092'," +
                        "'properties.group.id'='flinksql240620',"+
                        "'format'= 'csv'," +
                        "'scan.startup.mode' = 'latest-offset'" +
                  ")";
        streamTableEnv.executeSql(sourceTable);

        //todo 查询数据
        Table resultTable = streamTableEnv.sqlQuery("select id ,vc ,ts from t_source");

        //todo 打印数据
        //resultTable.execute().print();

        //打印视图
        streamTableEnv.createTemporaryView("result_table",resultTable);

        //将表的结果写到kafka
        String sinkTable =
                "create table t_sink (" +
                        "id STRING ," +
                        "sum_vc INT," +
                        "PRIMARY KEY (id) NOT ENFORCED" + //todo 设置主键约束,将相同key的数据发往同一分区
                ") WITH (" +
                        "'connector' = 'upsert-kafka'," + //update和insert同时进行
                        "'topic' = 'topicG'," +
                        "'properties.bootstrap.servers' = 'hadoop102:9092,hadoop103:9092,hadoop104:9092', " +
                        "'key.format' = 'json'," + //upsert-kafka必须指定key和value的格式
                        "'value.format' = 'json'" +
                       // "'sink.delivery-guarantee' = 'at-least-once' " +   //更新无法保证至少一次
                ")";
        streamTableEnv.executeSql(sinkTable);

        //写出
        streamTableEnv.executeSql("insert into t_sink select id,sum(vc) sum_vc from result_table group by id");

    }
}
