package com.atguigu.flink.chapter11;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;


public class Flink08_SQL_Kafka {
       public static void main(String[] args) throws Exception {
               Configuration configuration = new Configuration();
               //web  UI端口
               configuration.setInteger("rest.prot",10000);
           StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
               env.setParallelism(1);
           StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

           //建立一个表，与 kafka 关联 从kafka消费
           tableEnv
                   .executeSql(
                           "create table sensor(" +
                                   "  id string," +
                                   " ts bigint," +
                                   " vc int " +
                                   ")with (" +
                                   "    'connector' = 'kafka',\n" +
                                   "    'topic' = 's1',\n" +
                                   "    'properties.bootstrap.servers' = 'hadoop162:9092',\n" +
                                   "    'properties.group.id' = 'Flink07_SQL_Kafka',\n" +
                                   "    'scan.startup.mode' = 'latest-offset',\n" +
                                   "    'format' = 'json' )");
        //   tableEnv.sqlQuery("select * from sensor").execute().print();
           // 能过滤掉空数据

           //再建立一个表，与 kafka 关联 sink  到 kafka
           tableEnv
                   .executeSql(
                           "create table s_out(" +
                                   "  id string," +
                                   " ts bigint," +
                                   " vc int " +
                                   ")with (" +
                                   "    'connector' = 'kafka',\n" +
                                   "    'topic' = 's2',\n" +
                                   "    'properties.bootstrap.servers' = 'hadoop162:9092',\n" +
                                   "    'sink.partitioner' = 'round-robin',\n" +
                                   "    'format' = 'json' )");
           //结果同下行，执行结果一样   一天数据只能使用一次，不能  即在控制台打印，又sink kafka
         //  tableEnv.sqlQuery("select * from sensor").executeInsert("s_out");

           tableEnv.executeSql("insert into s_out select * from sensor");







       }
}
