package cn.itcast.flink.process;

import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

/**
 * Author itcast
 * Desc//1.准备环境 流执行环境和流表
 * //2.执行SQL,创建 input_kafka 表
 * // 创建 output_kafka
 * // 根据 status 是否为 success 条件筛选出来值
 * //3.toRetractStream
 * //4.打印输出
 * //5.执行sql 将筛选出来success的数据表插入到 output_kafka
 * //6.excute
 */
public class FlinkSQL_Table_Demo06 {
    public static void main(String[] args) throws Exception {
        //1.准备环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        //2.Source
        tEnv.executeSql(
                "CREATE TABLE input_kafka (\n" +
                "  `user_id` BIGINT,\n" +
                "  `page_id` BIGINT,\n" +
                "  `status` STRING\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'input_kafka',\n" +
                "  'properties.bootstrap.servers' = '192.168.88.161:9092',\n" +
                "  'properties.group.id' = 'testGroup',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'json'\n" +
                ")");

        //3.执行sql
        String sql ="SELECT user_id,page_id,status FROM input_kafka WHERE status ='success'";


        //4.更新到表中

        Table resultTable = tEnv.sqlQuery(sql);
        resultTable.printSchema();
        DataStream<Tuple2<Boolean, Row>> stream = tEnv.toRetractStream(resultTable, Row.class);
        stream.print();
        //5.插入到output_kafka中



        //7.excute

    }
}