package cn.azzhu.day07

import java.sql.Timestamp

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api._
import org.apache.flink.api.scala._
import org.apache.flink.table.api.bridge.scala.{StreamTableEnvironment, tableConversions}
import org.apache.flink.table.descriptors.{Csv, FileSystem, Schema}
/**
 * Flink-Table-SQL
 *
 * @author azzhu
 * @create 2020-09-23 00:16:54
 */
object TableExample {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    //有关表环境的配置
    val settings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner()  //使用blink planner,blink planner是流批统一的
      .inStreamingMode()
      .build()

    //初始化一个表环境
    val tableEnv = StreamTableEnvironment.create(env, settings)

    //创建表
    tableEnv
      .connect(new FileSystem().path("D:\\bigdata\\flink-learning\\src\\main\\resources\\sensor.txt"))
      .withFormat(new Csv)
      .withSchema(
        new Schema()
          .field("id",DataTypes.STRING())
          .field("timestamp",DataTypes.BIGINT())
          .field("temperature",DataTypes.DOUBLE())
      )
      .createTemporaryTable("inputTable") //创建临时表

    //将临时表转换成Table数据类型
    val sensorTable:Table = tableEnv.from("inputTable")

    //使用table api进行查询
    val result = sensorTable
      .select("id, temperature")
      .filter("id = 'sensor_1'")

   // tableEnv.toAppendStream[Row](result).print()

    //使用sql api 进行查询
    tableEnv
      .sqlQuery("select id, temperature from inputTable where id ='sensor_1'")
      .toRetractStream[(String, Double)]
      .print()

    env.execute("TableExample")
  }
}
