package cn.xuexiyuan.flinkstudy.sql;

import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

/**
 * @Description: 从 kafka 中消费数据并过滤出状态未 success 的数据再写入到 Kafka
 * @Author 左龙龙
 * @Date 21-3-30
 * @Version 1.0
 **/
public class Demo06_kafka {

    public static void main(String[] args) throws Exception {
        // 0.env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // 1.source
        TableResult inputTable = tableEnv.executeSql("CREATE TABLE input_kafka (\n" +
                "  `user_id` BIGINT,\n" +
                "  `item_id` BIGINT,\n" +
                "  `status` STRING\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'input_kafka',\n" +
                "  'properties.bootstrap.servers' = 'localhost:9092',\n" +
                "  'properties.group.id' = 'testGroup',\n" +
                "  'scan.startup.mode' = 'earliest-offset',\n" +
                "  'format' = 'json'\n" +
                ")");

        TableResult outputTable = tableEnv.executeSql("CREATE TABLE output_kafka (\n" +
                "  `user_id` BIGINT,\n" +
                "  `item_id` BIGINT,\n" +
                "  `status` STRING\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'output_kafka',\n" +
                "  'properties.bootstrap.servers' = 'localhost:9092',\n" +
                "  'sink.partitioner' = 'round-robin',\n" +
                "  'format' = 'json'\n" +
                ")");

        // 2.transformation
        Table etlResult = tableEnv.sqlQuery("select * from input_kafka where status = 'success'");
        etlResult.printSchema();
        DataStream<Tuple2<Boolean, Row>> tuple2DS = tableEnv.toRetractStream(etlResult, Row.class);
        tuple2DS.print();


        // 3.sink
        tableEnv.executeSql("insert into output_kafka select * from " + etlResult);

        // 4.excute
        env.execute();

    }
}

/*
# 创建 topic
./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic input_kafka
./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic output_kafka


# 启动生产者控制台程序
./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic input_kafka

# 启动消费者控制台程序,  from-beginning 告诉消费着从头开始消费数据
./bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic output_kafka --from-beginning


{"user_id": 1, "item_id": 1, "status":"success"}
{"user_id": 2, "item_id": 2, "status":"success"}
{"user_id": 3, "item_id": 3, "status":"fail"}
{"user_id": 4, "item_id": 4, "status":"success"}
{"user_id": 5, "item_id": 5, "status":"success"}

*/
