
package com.atguigu.day7

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.{DataTypes, EnvironmentSettings, Table}
import org.apache.flink.table.api.scala._
import org.apache.flink.api.scala._
import org.apache.flink.table.descriptors.{Csv, FileSystem, Schema}
import org.apache.flink.types.Row


object TableExample {
  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val settings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()



    val tEnv = StreamTableEnvironment.create(env,settings)
     tEnv
      //定义表数据来源，外部链接
      .connect(new FileSystem().path("D:\\job\\idea\\idea2018_workspces\\flink\\src\\main\\resources\\sensor.txt"))
      //定义从外部系统读取数据之后的格式化方法
      .withFormat(new Csv)
      .withSchema(
        //定义表结构
        new Schema()
          .field("id", DataTypes.STRING())
          .field("timestamp", DataTypes.BIGINT())
          .field("temperature", DataTypes.DOUBLE())
      ).inAppendMode()
      //创建临时表
      .createTemporaryTable("inputTable")

    //将临时表转换成Table数据类型
    val sersonTable:Table = tEnv.from("inputTable")

//使用sql     api 进行查询
    val result1 = tEnv.sqlQuery("SELECT * FROM inputTable where id='sensor_1'")
    tEnv.toAppendStream[Row](result1).print()



//  使用table   api进行查询
    var result = sersonTable
      .select("id,temperature,timestamp")
      .filter("id = 'sensor_1'")

    tEnv.toAppendStream[Row](result).print()




    env.execute()





  }

}

