package com.atguigu.flink.chapter11;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.*;

import static org.apache.flink.table.api.Expressions.$;

public class Flink05_Table_BaseUse_Kafka {
       public static void main(String[] args) throws Exception {
               Configuration configuration = new Configuration();
               //web  UI端口
               configuration.setInteger("rest.prot",10000);
               StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
               env.setParallelism(1);

           // 1. 创建表的执行环境
           StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

            // 直接从文件读取数据，使用tableAPI
           Schema schema = new Schema()
                   .field("id", DataTypes.STRING())
                   .field("ts", DataTypes.BIGINT())
                   .field("vc", DataTypes.INT());

           // 直接从文件读取数据，使用tableAPI
           tableEnv.connect(
                   new Kafka()
                   .version("universal")
                   .property("bootstrap.servers","hadoop162:9092") //kafka地址
                   .property("groupId","atguigu")
                   .topic("s1")
                   .startFromLatest()  //从最新的位置进行消费
           )
                   .withFormat(new Json())
                   .withSchema(schema)
                   .createTemporaryTable("sensor");

           Table sensor = tableEnv.from("sensor");

          // sensor.execute().print();

           //把sensor1查询结果写入到kafka
           Table result = sensor
                   .where($("id").isEqual("sensor_1"))
                   .select($("*"));
           //建一个动态表
           tableEnv.connect(
                   new Kafka()
                           .version("universal")
                           .property("bootstrap.servers","hadoop162:9092") //kafka地址
                           .property("groupId","atguigu")
                           .topic("s2")
                            .sinkPartitionerRoundRobin()
           )
                   .withFormat(new Json())
                   .withSchema(schema)
                   .createTemporaryTable("s2");

           result.executeInsert("s2");

           // 采用过时的connect方法   这种DDL 语言可能在下个版本更新，采用纯SQL 语言进行编写
           // 从kafaka消费数据  ，然后再 写入到kafka 中







       }
}
