package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.types.Row

object DEmo3ProTime {

  def main(args: Array[String]): Unit = {


    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    val settings: EnvironmentSettings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()


    val bsEnv: StreamTableEnvironment = StreamTableEnvironment.create(env, settings)


    bsEnv.executeSql(
      """
        |
        |CREATE TABLE kafkaTable (
        | word String,
        | user_action_time AS PROCTIME() -- 声明一个额外的列作为处理时间属性
        |) WITH (
        | 'connector' = 'kafka',
        | 'topic' = 'test_topic1',
        | 'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        | 'properties.group.id' = 'testGroup1',
        | 'format' = 'csv',
        | 'csv.ignore-parse-errors' = 'true',
        | 'scan.startup.mode' = 'earliest-offset'
        |)
        |
      """.stripMargin)


    /**
      * 创建自定义函数
      *
      */

    bsEnv.createFunction("utc2local", classOf[UTC2Local])


    bsEnv.sqlQuery(
      """
        |
        |select
        |word,
        |utc2local(TUMBLE_END(user_action_time, INTERVAL '5' SECOND)) as end_time,
        |count(1) as c
        |from kafkaTable
        |group by word,TUMBLE(user_action_time, INTERVAL '5' SECOND)
        |
        |
      """.stripMargin).toAppendStream[Row].print()


    env.execute()


  }

}
