package tableApi;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Csv;
import org.apache.flink.table.descriptors.FileSystem;
import org.apache.flink.table.descriptors.Schema;

public class tableTest03_Fileoutput {
    public static void main(String[] args) throws Exception {
        //1.创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        String filePath = "src/main/resources/sensor.txt";
        tableEnv.connect(new FileSystem().path(filePath))
                .withFormat(new Csv())//表的文件格式
                .withSchema(new Schema()//表的数据结构,属性名是可以变更的，但是顺序时不可以变换的
                        .field("id", DataTypes.STRING())
                        .field("timeStamp", DataTypes.BIGINT())
                        .field("temperature", DataTypes.DOUBLE()))
                .createTemporaryTable("sensor");
        Table inputTable = tableEnv.from("sensor");

        Table resultTable = inputTable.select("id,temperature")
                .filter("id = 'sensor_1'");
//        tableEnv.toAppendStream(resultTable, Row.class).print();
        //输出到文件
        String outputPath = "src/main/resources/sensor_result";
        tableEnv.connect(new FileSystem().path(outputPath))
                .withFormat(new Csv())
                .withSchema(new Schema()
                        .field("id", DataTypes.STRING())
                        .field("temperature", DataTypes.DOUBLE()))
                .createTemporaryTable("output");

        //普通的是可以的，但是聚合增加会报错
        resultTable.insertInto("output");

        env.execute();
    }
}
