package com.atguigu.day12;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Flink02_SQL_KafkaToKafka {
    public static void main(String[] args) throws Exception {

        //1.获取流的执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);
        //2.通过流的执行环境获取表的环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //3.创建表 连接到Kafka
        tableEnv.executeSql("CREATE TABLE source_kafka( \n" +
                "id String, \n" +
                "ts bigint , \n" +
                "vc int)\n" +
                "WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "  'properties.group.id' = 'atguigu',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'topic' = 'source_topic',\n" +
                "  'value.format' = 'csv'\n" +
                ")");

        tableEnv.executeSql("CREATE TABLE sink_kafka( \n" +
                "id String, \n" +
                "ts bigint , \n" +
                "vc int)\n" +
                "WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "  'topic' = 'sink_topic',\n" +
                "  'value.format' = 'csv'\n" +
                ")");

        //Flinksql写法
//        tableEnv.executeSql("insert into sink_kafka select * from source_kafka where id = 's1'");

        Table table = tableEnv.sqlQuery("select * from source_kafka where id = 's2'");

        //FlinkTableAPi写法
        table.executeInsert("sink_kafka");


    }
}
