package com.atguigu.flinksqltest.day11;

import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;


/**
 * ClassName: Test02
 * Package: com.atguigu.flinksqltest.day11
 * Description:
 *                  2.1 使用FlinkSQL读取Kafka数据(json:id,vc,ts),同时提取事件时间
 * 	                2.2 计算每个id的最大vc
 * 	                2.3 转换为流进行打印
 * @Author ChenJun
 * @Create 2023/4/19 9:04
 * @Version 1.0
 */
public class Test02 {
    public static void main(String[] args) throws Exception {
        //1. 创建流的运行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //2. 用FlinkSQL读取Kafka数据(json:id,vc,ts),同时提取事件时间
        tableEnv.executeSql(""+
                "CREATE TABLE kafka_source( \n" +
                "    id string, \n" +
                "    ts bigint, \n" +
                "    vc Integer,\n" +
                "    rt AS TO_TIMESTAMP_LTZ(ts,0),\n" +
                "    WATERMARK FOR rt AS rt - INTERVAL '2' SECOND\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "  'properties.group.id' = 'first1' ,\n" +
                "  'scan.startup.mode' = 'group-offsets' ,\n" +
                "  'topic' = 'first',\n" +
                "  'format' = 'json'\n" +
                ")");

        //3. 计算每个id的最大vc
        Table table = tableEnv.sqlQuery("select  id,max(vc) max_vc from kafka_source group by id");

        DataStream<Tuple2<Boolean, Row>> tuple2DataStream = tableEnv.toRetractStream(table, Row.class);

        tuple2DataStream.print("-->");

        env.execute();


    }
}
