package com.atguigu.flink.sql;

import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Created by Smexy on 2023/3/4
 *

 *
 *
 *  读取时，不支持 scan.startup.mode参数，不支持指定消费的位置。
 *          只能从头消费。
 *
 *          upsert-kafka主要用来保存一个changelog流(表中有更新)。
 *              不从头开始消费，就无法知道一条数据，最原始的(insert 进去)状态，及如何被更新了，更新后的状态。
 *
 */
public class Demo8_ReadUpsertKafka
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env);

        SingleOutputStreamOperator<WaterSensor> ds = env
            .socketTextStream("hadoop103", 8888)
            .map(new WaterSensorMapFunction());

        //为流起名字
        tableEnvironment.createTemporaryView("source",ds);

        //使用create语句创建表，映射文件系统中指定的数据
        String createTableSql = " create table t1 ( id string   , vc int , maxTs bigint  ," +
                                "  PRIMARY KEY (id,vc)  NOT ENFORCED )" +
                                " with ( " +
                                 " 'connector' = 'upsert-kafka' ,   " +
                                 " 'topic' =  'topicE'  ," +
                                "  'properties.group.id' = 'haha', " +
                                 " 'properties.bootstrap.servers' = 'hadoop102:9092'," +
                                 "  'key.format' = 'json' ," +
                                 "  'value.format' = 'json' " +
                                 "      )                 ";

        tableEnvironment.executeSql(createTableSql);


        tableEnvironment.sqlQuery("select * from t1")
                        .execute()
                        .print();



    }
}
