package com.atguigu.table

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{DataTypes, Table}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.descriptors.{Csv, FileSystem, Kafka, Schema}

/**
 * @ClassName TableKafkaTest
 * @Description table连接kafka
 * @Author Mr Yang
 * @Date 2020/10/6 10:37
 * @Version 1.0
 */
object TableKafkaTest {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val tableEnv = StreamTableEnvironment.create(env)
//    tableEnv.connect(new Kafka()
//      .version("0.12")
//      .property("zookeeper.connect", "localhost:2181")
//      .property("bootstrap.server", "localhost:9092")
//      .topic("sensor")
//    )
    tableEnv.connect(new FileSystem().path("F:\\work\\FlinkTutorial\\src\\main\\resources\\sensor.txt"))
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temperature", DataTypes.DOUBLE()))
      .createTemporaryTable("kafkaTableInput")

//    val inputTable: Table = tableEnv.from("kafkaTableInput")
//    inputTable.toAppendStream[(String, Long, Double)].print()

    //表的查询转换
    val sensorTable = tableEnv.from("kafkaTableInput")
    val resultTable = sensorTable
      .select('id, 'temperature)
      //.select("id, temperature")
      //.filter("id=='sensor_1'")
      .filter('id==="sensor_1")

    //聚合转换
    val aggResultTable: Table = sensorTable
        .groupBy('id)
        .select('id, 'id.count as 'count)

    val aggResultSqlTable = tableEnv.sqlQuery("select id, count(id) as cnt from kafkaTableInput group by id")

    resultTable.toAppendStream[(String, Double)].print("result")
    aggResultTable.toRetractStream[(String, Long)].print("agg result")
    aggResultSqlTable.toRetractStream[(String, Long)].print("sql result")
    env.execute("kafka table api test job")
  }
}
