package com.atguigu.flink.sql.connector;

import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Created by Smexy on 2023/4/11
 *
 *  insert into 目标表  select xxx from 源表
 *
 *  目标表： 把数据存入文件系统中
 */
public class Demo4_KafkaWrite
{
    public static void main(String[] args) throws Exception {

        //1.获取一个源表
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        SingleOutputStreamOperator<WaterSensor> ds = env
            .socketTextStream("hadoop102", 8888)
            .map(new WaterSensorMapFunction());

        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env);
        tableEnvironment.createTemporaryView("t2",ds);

        /*
              sink.partitioner: 数据写入kafka如何分区
         */
        String createTableSql = " create table t1 ( id STRING, ts BIGINT , vc INT " +
            "   )with(" +
            "  'connector' = 'kafka', " +
            "  'topic' = 'topicC'," +
            "  'properties.bootstrap.servers' = 'hadoop102:9092'," +
            "  'sink.partitioner' = 'round-robin'," +
            "  'format' = 'json' ) " ;

        //建表  目标表需要映射到文件系统，目标表中的数据是写入到文件系统
        tableEnvironment.executeSql(createTableSql);

        //写出
        tableEnvironment.executeSql(" insert into t1 select * from t2 ");

        env.execute();

    }
}
