/**
 * DataStream => Table（动态表） => CRUD => Table（动态表） => DataStream => Sink
 * //建议使用addsink 而不是文档中的 因为在flink1.11中文档已过时
 *
 *
 */
package com.atguigu.day7

import com.atguigu.source.{SensorReading, SensorSource}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.types.Row
import org.apache.kafka.clients.producer.KafkaProducer


object TableFromDataStreamExample {
  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val settings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()



    val tEnv= StreamTableEnvironment.create(env,settings)
    val stream:DataStream[SensorReading] = env.addSource(new SensorSource)

    //字符串必须以  '   开始，as用来取别名
    //DataStream => Table
    //不指定时使用样例类的字段名
    val table:Table = tEnv.fromDataStream(stream,'id,'timeStamP as 'ts,'temperature)
    tEnv.createTemporaryView("sensor",stream,'id,'timeStamP,'temperature)
    tEnv.sqlQuery("select * from sensor where id ='sensor_1'").toAppendStream[Row].print()
    table
      .select('id)
      .toAppendStream[Row]
      //建议使用addsink 而不是文档中的 因为在flink1.11中文档已过时
//      .addSink(
//        new KafkaProducer[]()
//      )

//      .print()

    table
      .select('id)
      .toRetractStream[Row]    //第一个字段true，表示追加，false表示撤回
      .print()

env.execute()



  }

}
