package com.atguigu.flink.tableapi;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.*;
import org.apache.kafka.clients.consumer.ConsumerConfig;

/**
 * Created by Smexy on 2023/2/5
 */
public class Demo7_ReadKafka
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env);

        //构造一个连接Kafka的描述
        Kafka kafka = new Kafka()
            .startFromEarliest()
            .property(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092")
            .topic("topicA")
            //必须加，代表kafka的版本是通用版
            .version("universal")
            .property(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true")
            .property(ConsumerConfig.GROUP_ID_CONFIG, "test3")
            .property(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "500");


        //提供元数据信息
        Schema schema = new Schema()
            .field("id", DataTypes.STRING())
            .field("ts", DataTypes.BIGINT())
            .field("vc", DataTypes.INT());

        /*
                连接外部设备，读取外部设备的数据，映射为表
         */
        tableEnvironment.connect(kafka)
                        //文件的数据格式
                        .withFormat(new Json())
                        //表的元数据信息
                        .withSchema(schema)
                        .inAppendMode()
                        .createTemporaryTable("t1");


        tableEnvironment.sqlQuery("select * from t1 ")
                        .execute()
                        .print();

    }
}
