package com.hu.flink12.api.sql;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

/**
 * @Author: hujianjun
 * @Date: 2021/2/9 23:02
 * @Describe: 使用FlinkSQl和Table  从kafka：input_topic主题消费数据并生成Table，然后过滤出状态为success的数据再写回到Kafka:output_topic
 *
 * {"user_id":"1","page_id":"11","status":"success"}
 * {"user_id":"2","page_id":"22","status":"success"}
 * {"user_id":"3","page_id":"33","status":"fail"}
 * {"user_id":"4","page_id":"44","status":"success"}
 */
public class KafkaSql {
    public static void main(String[] args) throws Exception {
        // TODO 1.获取env和tableEnv
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        EnvironmentSettings envSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, envSettings);


        // TODO 3.将DataStream数据转为Table或View,然后查询
        TableResult inputTable = tableEnv.executeSql("create table input_table(\n" +
                "user_id bigint,\n" +
                "page_id bigint,\n" +
                "status string\n" +
                ")with (\n" +
                "'connector'='kafka',\n" +
                "'topic'='input_topic',\n" +
                "'properties.bootstrap.servers'='localhost:9092',\n" +
                "'properties.group.id'='flink-group',\n" +
                "'scan.startup.mode'='latest-offset',\n" +
                "'format'='json'\n" +
                ")");

        Table inResult = tableEnv.sqlQuery("select * from input_table where status='success'");

//        Exception in thread "main" java.lang.IllegalStateException: No operators defined in streaming topology. Cannot execute.
        tableEnv.toRetractStream(inResult, Row.class).print("打印input_table的schema信息：");

        // TODO 4.sink
        TableResult outputTable = tableEnv.executeSql("create table output_table(\n" +
                "user_id bigint,\n" +
                "page_id bigint,\n" +
                "status string\n" +
                ")with (\n" +
                "'connector'='kafka',\n" +
                "'topic'='output_topic',\n" +
                "'properties.bootstrap.servers'='localhost:9092',\n" +
                "'format'='json',\n" +
                "'sink.partitioner'='round-robin'\n" +
                ")");

        TableResult outResult = tableEnv.executeSql("insert into output_table select * from " + inResult);

        // TODO 5.执行
        env.execute();
    }
}
