package com.atguigu.chapter11;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Author: Pepsi
 * Date: 2023/8/24
 * Desc:
 */
public class Flink10_SQL_Kfaka_Update {
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);


        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        /*tEnv.executeSql("create table sensor(" +
                " id string, " +
                " ts bigint, " +
                " vc int " +
                ")with(" +
                "'connector' = 'kafka'," +
                "'topic' = 'flink_source_kafka'," +
                "'properties.bootstrap.servers' = 'hadoop101:9092'," +
                "'properties.group.id' = 'flink_consumer_group'," +
                "'scan.startup.mode' = 'earliest-offset'," +
                "'format' = 'json'" +
                ")");*/

        tEnv.executeSql("create table sensor(" +
                " id string, " +
                " vc int ," +
                " primary key(id) not enforced" +
                ")with(" +
                "'connector' = 'upsert-kafka'," +
                "'topic' = 'flink_sink_kafka'," +
                "'properties.bootstrap.servers' = 'hadoop101:9092'," +
                "'key.format' = 'json'," +
                "'value.format' = 'json'" +
                ")");

        tEnv.sqlQuery("select * from sensor").execute().print();

    }
}
