package cn.azzhu.day08

import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala.{StreamTableEnvironment, tableConversions}
import org.apache.flink.table.functions.{ScalarFunction, TableFunction}
import org.apache.flink.types.Row

/**
 * Flink-Table-SQL：标量聚合和表聚合UDF函数
 * @author azzhu
 * @create 2020-09-23 22:49:35
 */
object TableFunctionExample {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val settings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()

    val stream = env.fromElements(
      "hello#word",
      "hello#bigdata"
    )

    val tEnv = StreamTableEnvironment.create(env, settings)

    //table写法
    val table = tEnv.fromDataStream(stream,'s)

    val split = new Split("#")

    table
      //为了将`hello#world` 和 `hello 5` join到一行
      .joinLateral(split('s) as ('word,'length))
      //下面的写法和上面的等价
      //.leftOuterJoinLateral(split('s) as ('word,'length))
      .select('s,'word,'length)
      .toAppendStream[Row]
      //.print()

    //sql写法
    tEnv.registerFunction("split",split)
    tEnv.createTemporaryView("t",table)
    tEnv
      //t 的意思是元组，flink里面固定语法
      .sqlQuery("select s,word,length from t,lateral table(split(s)) as T(word,length)")
      //
      .toAppendStream[Row]
      .print()

    env.execute("TableFunctionExample")
  }

  //输出的泛型是(String,Int)
  class Split(sep: String) extends TableFunction[(String,Int)] {
    def eval(s: String):Unit = {
      //使用collect方法向下游发送数据
      s.split(sep).foreach(x => collect(x,x.length))
    }
  }
}
