package com.atguigu.flink.sql;

import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Created by Smexy on 2023/2/5
 */
public class Demo8_ReadUpsertKafka
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env);

        SingleOutputStreamOperator<WaterSensor> ds = env
            .socketTextStream("hadoop103", 8888)
            .map(new WaterSensorMapFunction());
        Table table = tableEnvironment.fromDataStream(ds);

        //为表起名
        tableEnvironment.createTemporaryView("t2",table);

        /*
                读upsert-kafka 不支持声明从哪个位置开始读，必须从主题的头开始读。
                    只有从头读，才能识别每一条数据的 变化历史。
                        默认upsert-kafka连接池使用的是 changelogStream.  +I,-U,+U

                 普通的kafka 连接池，消费的是append-only stream，只会显示每一条数据为 +I.

         */
        String createTableSql = "create table t1( id string , ts bigint ,sumVc double , primary key(id,ts) NOT ENFORCED ) with(" +
            "                    'connector' = 'upsert-kafka' ," +
            "                    'topic' = 'topicE' , " +
            "                    'properties.bootstrap.servers' = 'hadoop102:9092' , " +
            "                    'properties.group.id' = 'test3' , " +
            "                    'value.format' = 'json'  ," +
            "                    'key.format' = 'json'  " +
            "                    )";


        String kafka = "create table t1( id string,ts bigint ,vc double ) with(" +
            "                    'connector' = 'kafka' ," +
            "                    'topic' = 'topicE' , " +
            "                    'properties.bootstrap.servers' = 'hadoop102:9092' , " +
            "                    'properties.group.id' = 'test3' , " +
            "                    'scan.startup.mode' = 'earliest-offset' , " +
            "                    'value.format' = 'json'  " +
            "                    )";

        //执行建表(写操作)
        tableEnvironment.executeSql(kafka);


        tableEnvironment.sqlQuery("select * from t1 ")
                        .execute()
                        .print();

    }
}
