package cn.dfun.sample.flink.tabletest

import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{DataTypes, EnvironmentSettings, Table, TableEnvironment}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.descriptors._

/**
  * 1.分别基于老版本和blink planner的批流处理
  * 2.分别基于文件数据和kafka数据创建表
  * 3.使用table api和sql进行查询转换
  */
// flink table api和sql本质都是流
// ./bin/kafka-console-producer.sh --broker-list node-01:9092 --topic sensor
object TableAPITest {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    // 默认使用老版本planner
    val tableEnv = StreamTableEnvironment.create(env)
    // blink版本批流统一
    // 基于老版本的流处理
/*    val settings = EnvironmentSettings.newInstance()
      .useOldPlanner()
      .inStreamingMode()
      .build()
    val oldStreamTableEnv = StreamTableEnvironment.create(env, settings)

    // 基于老版本的批处理
    val batchEnv = ExecutionEnvironment.getExecutionEnvironment
    val oldBatchTableEnv = BatchTableEnvironment.create(batchEnv)

    // 基于blink planner的流处理
    val blinkStreamSettings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()
    val blinkStreamTableEnv = StreamTableEnvironment.create(env, blinkStreamSettings)

    // 基于blink planner的批处理
    val blinkBatchSettings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inBatchMode()
      .build()
    val blinkBatchTableEnv = TableEnvironment.create(blinkBatchSettings)*/

    // 读取文件数据注册表
    val inputPath = "C:\\wor\\flink-sample\\src\\main\\resources\\sensor"
    tableEnv.connect(new FileSystem().path(inputPath))
        // 旧版,非标,弃用不支持kafka
//        .withFormat(new OldCsv())
        .withFormat(new Csv())
        .withSchema(new Schema()
          .field("id", DataTypes.STRING())
          .field("timestamp", DataTypes.BIGINT())
          .field("temperature", DataTypes.DOUBLE())
        )
        .createTemporaryTable("inputTable")
    // 读取kafka数据注册表
    tableEnv.connect(new Kafka()
        .version("0.11")
        .topic("sensor")
        .property("zookeeper.connect", "node-01:2181")
        .property("bootstrap.servers", "node-01:9092")
      )
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temperature", DataTypes.DOUBLE())
      )
      .createTemporaryTable("kafkaInputTable")

//    val inputTable: Table = tableEnv.from("inputTable")
    val inputTable: Table = tableEnv.from("kafkaInputTable")
    inputTable.toAppendStream[(String, Long, Double)].print()

    // 3.查询转换
    // 3.1 使用table api
    val sensorTable = tableEnv.from("inputTable")
    val resultTable = sensorTable
        // 老版本语法
        .select('id, 'temperature)
        .filter('id === "sensor_1")

    // 3.2 sql
    val resultSqlTable = tableEnv.sqlQuery(
      """
        |select id, temperature
        |from inputTable
        |where id = 'sensor_1'
      """.stripMargin
    )
    resultTable.toAppendStream[(String, Double)].print("result")
    resultSqlTable.toAppendStream[(String, Double)].print("sql result")

    // 4 sink

    env.execute("table api test")
  }
}
