package com.yanggu.flink.tableapi_sql.connector

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala._

/**
 * 使用SQL的DDL语句定义kafka的数据源
 * 常见的Table Connector可以见flink官网
 * https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/connectors/table/overview/
 * 关于kafka连接器如何使用可以见flink官网
 * https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/connectors/table/kafka/
 */
object KafkaConnectorTest {

  def main(args: Array[String]): Unit = {
    val environment = StreamExecutionEnvironment.getExecutionEnvironment

    val settings = EnvironmentSettings
      .newInstance()
      .inStreamingMode()
      .build()

    val tableEnvironment = StreamTableEnvironment.create(environment, settings)

    tableEnvironment.executeSql(
      """
        |CREATE TABLE kafkaTable (
        |  `user_id` BIGINT,
        |  `item_id` BIGINT,
        |  `behavior` STRING,
        |  `ts` TIMESTAMP(3) METADATA FROM 'timestamp'
        |) WITH (
        |  'connector' = 'kafka',
        |  'topic' = 'hotitems',
        |  'properties.bootstrap.servers' = 'localhost:9092',
        |  'properties.group.id' = 'KafkaConnectorTest',
        |  'scan.startup.mode' = 'earliest-offset',
        |  'format' = 'json'
        |)
        |""".stripMargin)

    tableEnvironment.sqlQuery(
      """
        |SELECT
        |   *
        |FROM
        |   kafkaTable
        |""".stripMargin)
      .execute()
      .print()

  }

}
