package com.atguigu.fink.chapter01.tableapi;

import com.atguigu.fink.bean.WaterSensor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

import static org.apache.flink.table.api.Expressions.$;

/**
 * @Author lzc
 * @Date 2022/11/28 09:06
 */
public class Flink01_TableApi_BaseUse_2 {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        DataStreamSource<WaterSensor> stream = env.fromElements(
            new WaterSensor("s1", 1L, 10),
            new WaterSensor("s1", 2L, 10),
            new WaterSensor("s2", 3L, 20),
            new WaterSensor("s1", 4L, 30),
            new WaterSensor("s1", 5L, 40),
            new WaterSensor("s1", 6L, 50)
        );
    
        // 1. 创建一个表的执行环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        // 2. 通过表的执行环境, 把流转成表
        Table table = tEnv.fromDataStream(stream);
        // 3. 在动态表上执行连续查询,查询的结果又是一个动态表
        Table result = table
            .select($("id"), $("vc"));
        // 4. 通过环境把表转成流
        // spark sql: DataSet<Row> === DataFrame
        DataStream<Row> resultStream = tEnv.toAppendStream(result, Row.class);
        resultStream.print();
        env.execute();
    
    
    }
}
