package tableApi;

import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.java.BatchTableEnvironment;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Csv;
import org.apache.flink.table.descriptors.FileSystem;
import org.apache.flink.table.descriptors.Schema;
import org.apache.flink.types.Row;

public class tableTest02_CommonApi {
    public static void main(String[] args) throws Exception {
        //1.创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //1.1基于老版本planner的流处理
        EnvironmentSettings oldStreamSettings = EnvironmentSettings.newInstance()
                .useOldPlanner()//老版本
                .inStreamingMode()//流处理
                .build();
        StreamTableEnvironment oldStreamTbaleEnv = StreamTableEnvironment.create(env, oldStreamSettings);

        //1.2基于老版本planner的批处理
        ExecutionEnvironment batchEnv = ExecutionEnvironment.getExecutionEnvironment();
        BatchTableEnvironment oldBatchTableEnv = BatchTableEnvironment.create(batchEnv);

        //1.3基于Blink的流处理
        EnvironmentSettings BlinkStreamSettings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
                .build();
        StreamTableEnvironment blinkStreamTbaleEnv = StreamTableEnvironment.create(env, BlinkStreamSettings);
        //1.4基于Blink的批处理
        EnvironmentSettings blinkBatchSettings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inBatchMode()//批处理
                .build();
        TableEnvironment blinkBatchTbaleEnv = TableEnvironment.create(blinkBatchSettings);

        //2.表的创建：连接外部系统，读取数据
        //2.1读取文件的数据
        String filePath = "src/main/resources/sensor.txt";
        tableEnv.connect(new FileSystem().path(filePath))
                .withFormat(new Csv())//表的文件格式
                .withSchema(new Schema()//表的数据结构,属性名是可以变更的，但是顺序时不可以变换的
                        .field("id", DataTypes.STRING())
                        .field("timeStamp", DataTypes.BIGINT())
                        .field("temperature", DataTypes.DOUBLE()))
                .createTemporaryTable("sensor");
        Table inputTable = tableEnv.from("sensor");
        //打印表的结构
//        inputTable.printSchema();
//        tableEnv.toAppendStream(inputTable, Row.class).print();
        //3.查询
        //3.1简单的转换
        Table resultTable = inputTable.select("id,temperature")
                .filter("id = 'sensor_1'");
        //聚合统计
        Table aggTable = inputTable.groupBy("id")
                .select("id,id.count as cnt ,temperature.avg as avgTemp");
        //3.2sql实现
        Table sqlResulter = tableEnv.sqlQuery("select id,temperature from sensor where id = 'sensor_1'");
        Table sqlAgg = tableEnv.sqlQuery("select id,count(id) as cnt ,avg(temperature) as avgTemp from sensor group by id");

        //打印输出
        tableEnv.toAppendStream(resultTable,Row.class).print("print-1");
        tableEnv.toRetractStream(aggTable,Row.class).print("print-2");
        tableEnv.toRetractStream(sqlResulter,Row.class).print("print-3");
        tableEnv.toRetractStream(sqlAgg,Row.class).print("print-4");
        env.execute();
    }
}
