package com.atguigu.flink.chapter11;

import com.atguigu.flink.chapter05.Source.WaterSensor;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Csv;
import org.apache.flink.table.descriptors.FileSystem;
import org.apache.flink.table.descriptors.Kafka;
import org.apache.flink.table.descriptors.Schema;

import static org.apache.flink.table.api.Expressions.$;

/**
 * TODO
 *
 * @author cjp
 * @version 1.0
 * @date 2021/1/27 10:11
 */
public class Flink06_TableAPI_Connect_KafkaSink {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        DataStreamSource<WaterSensor> sensorDS =
                env.fromElements(
                        new WaterSensor("sensor_1", 1000L, 10),
                        new WaterSensor("sensor_1", 2000L, 20),
                        new WaterSensor("sensor_2", 3000L, 30),
                        new WaterSensor("sensor_1", 4000L, 40),
                        new WaterSensor("sensor_1", 5000L, 50),
                        new WaterSensor("sensor_2", 6000L, 60));


        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        Table sensorTable = tableEnv.fromDataStream(sensorDS);

        Table resultTable = sensorTable
                .where($("vc").isGreaterOrEqual(20))
                .select($("id"), $("ts"), $("vc"));

        // TODO 通过 Connect，直接输出到 外部系统（kafka）

        Schema schema = new Schema()
                .field("a", DataTypes.STRING())
                .field("b", DataTypes.BIGINT())
                .field("c", DataTypes.INT());

        tableEnv
                .connect(
                        new Kafka()
                                .version("universal")
                                .topic("flinksql0820")
                                .sinkPartitionerRoundRobin()
                                .property("bootstrap.servers", "hadoop1:9092,hadoop2:9092,hadoop3:9092")
                )
                .withFormat(new Csv().fieldDelimiter(','))
                .withSchema(schema)
                .createTemporaryTable("kafkaSinkTable");


        resultTable.executeInsert("kafkaSinkTable");

        // execute()方法会去分析代码，生成一些 graph，但是我们代码中没有调用算子，所以会报错，可以直接注掉
//        env.execute();
    }
}
