package com.lsx143.wordcount.day10;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Csv;
import org.apache.flink.table.descriptors.FileSystem;
import org.apache.flink.table.descriptors.Schema;
import org.apache.flink.types.Row;
import org.apache.hadoop.mapreduce.ID;

import static org.apache.flink.table.api.Expressions.$;

public class Flink_Table_Connector_Read {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 20000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        //1、获得table环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //2、设置表的字段
        Schema schema = new Schema()
                .field("id", DataTypes.STRING())
                .field("ts", DataTypes.BIGINT())
                .field("vc", DataTypes.INT());
        //3、从文件读取数据转换为动态表
        tableEnv.connect(new FileSystem().path("input/sensor.txt"))
                .withFormat(new Csv().fieldDelimiter(',').lineDelimiter("\n"))
                .withSchema(schema)
                .createTemporaryTable("sensor");
        //4、对动态表进行查询
        Table resultTable = tableEnv.sqlQuery("select id,count(*) cnt from sensor group by id");
        //5、动态表转换为流准备输出
        tableEnv
                //过滤变化的数据，只显示最后的数据
                .toRetractStream(resultTable, Row.class)
                .filter(t -> t.f0)
                .map(t -> t.f1)
                .print();
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}