package com.feiwei
import org.apache.flink.streaming.api._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.DataTypes
import org.apache.flink.table.api.scala._
import org.apache.flink.table.descriptors.{Csv, Kafka, Schema}
object day6_KafkaPipeline {


  def main(args: Array[String]): Unit = {
    val set = StreamExecutionEnvironment.getExecutionEnvironment

    val tableEnv = StreamTableEnvironment.create(set)

    //链接外部系统，创建表
    //.1 读取文件


    tableEnv.connect(new Kafka().version("0.11")
      .topic("sensor")
      .property("zookeeper.connect", "localhost:2181")
      .property("bootstrap.servers", "localhost:9092")
    ).withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temperature", DataTypes.DOUBLE()))
      .createTemporaryTable("inputTable")

     //2转换操作
   val source= tableEnv.from("inputTable")

   val t1= source
      .select('id,'temperature)
      .filter('id ==="sensor_1")


    val t2 =source
      .groupBy("id")
      .select('id,'id.count as 'count)



    tableEnv.connect(new Kafka().version("0.11")
      .topic("sinktest")
      .property("zookeeper.connect", "localhost:2181")
      .property("bootstrap.servers", "localhost:9092")
    ).withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("temperature", DataTypes.DOUBLE()))
      .createTemporaryTable("outputTable")
    //输出到kafka
      t1.insertInto("outputTable")
    set.execute()
  }
}
