package com.sand.pro.flinksql_pro.sql;

import com.sand.pro.flinksql_pro.bean.WaterSensor;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.concurrent.ExecutionException;

public class Data2kafka {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        DataStreamSource<WaterSensor> waterSensorStream =
                env.fromElements(new WaterSensor("sensor_1" , 1000L, 10),
                        new WaterSensor("sensor_1" , 2000L, 20),
                        new WaterSensor("sensor_2" , 3000L, 30),
                        new WaterSensor("sensor_1" , 4000L, 40),
                        new WaterSensor("sensor_1" , 5000L, 50),
                        new WaterSensor("sensor_2" , 6000L, 60));

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, EnvironmentSettings.newInstance()
                // Watermark is only supported in blink planner
                .useBlinkPlanner()
                .inStreamingMode()
                .build());
        env.getConfig().setRestartStrategy(RestartStrategies.noRestart());
        // access flink configuration
        Configuration configuration = tableEnv.getConfig().getConfiguration();
        configuration.setString("table.exec.source.idle-timeout" , "1000");
        // 使用sql查询未注册的表
        Table inputTable = tableEnv.fromDataStream(waterSensorStream);
        // 2. 把注册为一个临时视图
        tableEnv.createTemporaryView("source_sensor" , inputTable);
        tableEnv.executeSql("create table sink_sensor(id string, ts bigint, vc int,PRIMARY KEY(id) NOT ENFORCED) with("
                + "'connector' = 'upsert-kafka',"
                + "'topic' = 'topic_sink_sensor_upset_2',"
                + "'properties.bootstrap.servers' = '172.28.246.66:9092,172.28.246.67:9092,172.28.246.68:9092',"
                + "'properties.group.id' = 'user_log',"
                + "'key.format' = 'json',"
                + " 'key.json.ignore-parse-errors' = 'true',"
                + "'value.format' = 'json',"
                + "'value.json.fail-on-missing-field' = 'false',"
                + "'value.fields-include' = 'ALL'"
                + ")");

        // 3. 从SourceTable 查询数据, 并写入到 SinkTable
        try {
            tableEnv.executeSql("insert into sink_sensor select * from source_sensor").await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        }
    }
}
