package scala.hdfs.kafka

import cn.getech.data.development.utils.FlinkUtils
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.types.Row

object JsonTest_par {

  def main(args: Array[String]): Unit = {

    val sql =
      """
        |CREATE TABLE user_log(
        | userid string,
        | action string,
        | duration bigint,
        | dt bigint,
        | subModel string
        |) WITH (
        | 'connector.type' = 'kafka',
        | 'connector.version' = 'universal',
        | 'connector.properties.group.id' = 'test1',
        | 'connector.topic' = 'test_topic',
        | 'connector.properties.zookeeper.connect' = 'bigdata-test-1:2181,bigdata-test-3:2181,bigdata-test-5:2181',
        | 'connector.properties.bootstrap.servers' = 'bigdata-test-4:9092,bigdata-test-5:9092,bigdata-test-6:9092',
        | 'connector.startup-mode' = 'earliest-offset',
        | 'update-mode' = 'append',
        | 'format.derive-schema' = 'true',
        | 'format.type' = 'json'
        | )
        |""".stripMargin
    /**
     * ,
     * 'format.json-schema' = '{
     * "type": "object",
     * "properties": {
     * "userid": {type: "string"},
     * "action": {type: "string"},
     * "duration": {type: "long"},
     * "dt": {type: "long"}
     * }
     * }'
     */

    val tEnv: StreamTableEnvironment = FlinkUtils.createStreamTableEnv(FlinkUtils.getEnv)
    tEnv.execute(sql)
    val table = tEnv.from("user_log")
    import org.apache.flink.api.scala._
    tEnv.toAppendStream[Row](table).print("------------------------------")

  }

}
