package com.atguigu.flink.sql.connector;

import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**

    insert into 目标表  select * from 源表
        源表： 从流转换
        目标表:  数据最终希望存储在文件系统中

 */
public class Demo4_WriteKafka
{
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(2);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        SingleOutputStreamOperator<WaterSensor> ds = env
            .socketTextStream("hadoop102", 8888)
            .map(new WaterSensorMapFunction());

        //源表
        Table table = tableEnv.fromDataStream(ds);
        tableEnv.createTemporaryView("t2",table);

        //定义目标表建表语句
        String createTableSql = "CREATE TABLE t1 (" +
            "  id STRING," +
            "  ts BIGINT," +
            "  vc INT " +
            ")  WITH (" +
            " 'connector' = 'kafka'," +
            "  'topic' = 't3'," +
            "  'properties.bootstrap.servers' = 'hadoop102:9092'," +
            "  'sink.partitioner' = 'default' ," +
            "  'format' = 'json' " +
            ")";

        tableEnv.executeSql(createTableSql);

        //执行写出
       tableEnv.executeSql("insert into t1 select * from t2");

    }
}
