package com.atguigu.flink.sql;

import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Created by Smexy on 2023/2/5
 */
public class Demo6_WriteKafkaAgg
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env);

        SingleOutputStreamOperator<WaterSensor> ds = env
            .socketTextStream("hadoop103", 8888)
            .map(new WaterSensorMapFunction());
        Table table = tableEnvironment.fromDataStream(ds);

        //为表起名
        tableEnvironment.createTemporaryView("t2",table);

        /*
                建表时字段名随便。
                    一定要和写入sql的字段的类型匹配。
                    写入时，只参考字段的顺序

                 upsert kakfa:
                            写入时，如果是insert和update之后的数据，以普通消息写入。
                            如果是删除的数据，写入 Message(key=xxx,value=null)

                          建表时，需要声明主键(作为写入消息的key)。
         */
        String createTableSql = "create table t1( id string primary key , sumVc double ) with(" +
            "                    'connector' = 'upsert-kafka' ," +
            "                    'topic' = 'topicA' , " +
            "                    'properties.bootstrap.servers' = 'hadoop102:9092' , " +
            "                    'value.format' = 'json'  ," +
            "                    'key.format' = 'json'  " +
            "                    )";

        //执行建表(写操作)
        tableEnvironment.executeSql(createTableSql);

        /*
            Exception in thread "main" org.apache.flink.table.api.TableException:
            Table sink 'default_catalog.default_database.t1'
            doesn't support consuming update changes which is produced by
            node GroupAggregate(groupBy=[id], select=[id, SUM(vc) AS a])

            聚合操作，表中的数据需要变化，目前kafka连接器，不支持。
            使用upsert kakfa 连接器，既支持insert还支持update


         */
        tableEnvironment.executeSql("insert into t1 select id,sum(vc) a from t2 group by id");

    }
}
