package com.mjf.day7

import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.time.Time
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{DataTypes, Table}
import org.apache.flink.table.descriptors.{Csv, Kafka, Schema}

/**
 * 输出表计算的结果到Kafka
 */
object KafkaTableExample {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    env.setParallelism(1)

    // 设置重启策略
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(5)))

    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env)

    // 从Kafka读取数据
    tableEnv
      .connect(
        new Kafka()
          .version("0.11")
          .topic("sensor")
          .property("bootstrap.servers", "hadoop103:9092")
          .property("zookeeper.connect", "hadoop103:2181")
      )
      .withFormat(new Csv())  // Kafka必须是新版的
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temperature", DataTypes.DOUBLE())
      )
      .createTemporaryTable("kafkaInputTable")

    // 执行SQL
    val resultTable: Table = tableEnv.sqlQuery(
      """
        |select id,temperature
        |from kafkaInputTable
        |where id = 'sensor_1'
        |""".stripMargin)


    // 定义Kafka存放数据
    tableEnv
      .connect(
        new Kafka()
          .version("0.11")
          .topic("sensor_output")
          .property("bootstrap.servers", "hadoop103:9092")
          .property("zookeeper.connect", "hadoop103:2181")
      )
      .withFormat(new Csv())  // Kafka必须是新版的
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("temp", DataTypes.DOUBLE())
      )
      .createTemporaryTable("kafkaOutputTable")

    // 将结果输出到Kafka
    resultTable.insertInto("kafkaOutputTable")


    env.execute("KafkaTableExample")

  }
}

