package cn.itcast.flink.sink.connector;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Author itcast
 * Date 2021/12/8 16:16
 * Desc - 从kafka中读取数据并将数据写入到kafka中
 */
public class OrderSinkKafka {
    public static void main(String[] args) throws Exception {
        //todo 创建流执行环境，设置并行度
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //todo 设置环境设置为blink计划器并且是流模式
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .inStreamingMode()
                .useBlinkPlanner()
                .build();
        //todo 设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000));

        //todo 创建流表环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings);

        //todo 到kafka的连接，创建到kafka的输入表，版本、topic、属性，使用csv格式，指定schema字段带上processtime生成临时表
        /*tEnv.connect(
                new Kafka()
                        .topic("input")
                        .version("universal")
                        .property(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092,node2:9092,node3:9092")
                        .property(ConsumerConfig.GROUP_ID_CONFIG, "_consumer_input_")
                        .startFromLatest()
                        .sinkPartitionerRoundRobin()
        ).withFormat(new Csv()
                .fieldDelimiter(',')
        ).withSchema(new Schema()
                .field("uid", DataTypes.STRING())
                .field("tms", DataTypes.BIGINT())
                .field("category", DataTypes.STRING())
                .field("price", DataTypes.DOUBLE())
                .field("areaName", DataTypes.STRING())
        ).createTemporaryTable("t_order_kafka_input");*/

        //创建一个 input topic
        // 打开 kafka tool 创建 source 和 output
        // 在 kafka tool 中插入数据
        String sql = "CREATE TABLE t_order_kafka_input (" +
                "id int," +
                "timestamps string," +
                "category string," +
                "money string, areaName string" +
                ") WITH (" +
                "  'connector' = 'kafka'," +
                "  'topic' = 'source'," +
                "  'properties.bootstrap.servers' = 'node1:9092,node2:9092,node3:9092'," +
                "  'properties.group.id' = 'testGroup'," +
                "  'scan.startup.mode' = 'latest-offset'," +
                "  'format' = 'json'" +
                ")";
        System.out.println(sql);
        tEnv.executeSql(
                sql
        );

        //todo 到kafka的连接，创建到kafka的输出表kafkaOutputTable，版本、topic、属性，使用csv格式，指定schema字段
        /*tEnv.connect(
                new Kafka()
                        .topic("output")
                        .version("universal")
                        .property("bootstrap.servers", "node1:9092,node2:9092,node3:9092")
                        .startFromLatest()
                        .sinkPartitionerRoundRobin()
        ).withFormat(new Csv()
                .fieldDelimiter(',')
        ).withSchema(new Schema()
                .field("uid", DataTypes.STRING())
                .field("tms", DataTypes.BIGINT())
                .field("category", DataTypes.STRING())
                .field("price", DataTypes.DOUBLE())
                .field("areaName", DataTypes.STRING())
        ).createTemporaryTable("t_order_kafka_output");*/

        //要创建输出的 kafka topic ： output
        //打开 kafka consumer 查看消费的数据
        //[root@node1 kafka]# bin/kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --topic output
        String outputSql ="CREATE TABLE t_order_kafka_output (" +
                "id int," +
                "timestamps string," +
                "category string," +
                "money string, areaName string" +
                ") WITH (" +
                "  'connector' = 'kafka'," +
                "  'topic' = 'output'," +
                "  'properties.bootstrap.servers' = 'node1:9092,node2:9092,node3:9092'," +
                "  'properties.group.id' = '_producer_output_'," +
                "  'scan.startup.mode' = 'latest-offset'," +
                "  'format' = 'json'" +
                ")";
        System.out.println(outputSql);
        tEnv.executeSql(
                outputSql
        );

        //todo 计算北京地区的表
        Table result = tEnv.sqlQuery("select * from t_order_kafka_input where areaName='北京'");
        //todo 将北京地区的表执行插入到 kafka输出表中
        tEnv.executeSql("insert into t_order_kafka_output select * from " + result);
        //执行流环境
        env.execute();
    }
}