package data.connector

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.descriptors.{Csv, FileSystem, Kafka, Schema}
import org.apache.flink.types.Row

// 开发时要做的第一件事情
import org.apache.flink.api.scala._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api._


/**
 * 布隆过滤器：最Redis的整合
 *
 * 作业：Table API整合HBase： https://ci.apache.org/projects/flink/flink-docs-release-1.10/dev/table/connect.html#hbase-connector
 */
object KafkaConnector {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val tableEnvironment = StreamTableEnvironment.create(env)

    tableEnvironment.connect(new Kafka()
    .version("universal")
    .topic("flinktopic")
     .property("bootstrap.servers", "hadoop000:9092")
    )
    .withFormat(new Csv())
    .withSchema(new Schema()
        .field("timestamp", DataTypes.BIGINT())
        .field("domain", DataTypes.STRING())
        .field("traffic", DataTypes.FLOAT())
    ).createTemporaryTable("kafkatable")


    val resultTable = tableEnvironment.from("kafkatable")
      .select('domain, 'traffic)
      .filter('domain === "bsdata.com")

    resultTable.toAppendStream[Row].print("...")

    tableEnvironment.connect(new Kafka()
      .version("universal")
      .topic("flinktopictest")
      .property("bootstrap.servers", "hadoop000:9092")
    )
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("domain", DataTypes.STRING())
        .field("traffic", DataTypes.FLOAT())
      ).createTemporaryTable("kafkaoutputtable")

    resultTable.executeInsert("kafkaoutputtable")

    env.execute("xx")
  }
}
