package com.atguigu.flinksql.day11;

import com.atguigu.datastream.bean.WaterSensor;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.table.api.Expressions.$;


/**
 * ClassName: Test01
 * Package: com.atguigu.flinksql.day11
 * Description:
 * 1.1 使用DataStream方式读取端口数据(id,vc,ts)
 * 	1.2 转换为动态表并提取处理时间
 * 	1.3 计算每个id的最大vc
 * 	1.4 输出到Kafka
 * @Author ChenJun
 * @Create 2023/4/19 18:18
 * @Version 1.0
 */
public class Test01 {
    public static void main(String[] args) {

        //1. 获取流的执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //2. 使用DataStream方式读取端口数据(id,vc,ts)
        DataStreamSource<String> streamSource = env.socketTextStream("hadoop102", 9999);

        SingleOutputStreamOperator<WaterSensor> waterSensorDS = streamSource.map(line -> {
            String[] split = line.split(",");
            return new WaterSensor(split[0], Long.parseLong(split[1]), Integer.parseInt(split[2]));
        });

        //3. 转换为动态表并提取处理时间
        Table table = tableEnv.fromDataStream(waterSensorDS,$("id"),$("ts"),$("vc"),$("pt").proctime());
        tableEnv.createTemporaryView("t1",table);

        //4. 计算每个id的最大vc
        Table table1 = tableEnv.sqlQuery("select id,max(vc) max_vc from t1 group by id");
        tableEnv.createTemporaryView("t2",table1);

        //5. 输出到Kafka
        // 创建表
        tableEnv.executeSql("CREATE TABLE kafka_sink (\n" +
                "  `id` STRING,\n" +
                "  `max_vc` Integer,\n" +
                "   PRIMARY KEY (id) NOT ENFORCED"+
                ") WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = 'test',\n" +
                "  'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'json'\n" +
                ")");

        // 传入数据
        tableEnv.executeSql("insert into kafka_sink select * from t2");



    }
}
