package com.desheng.bigdata.flink.table

import org.apache.flink.api.common.typeinfo.{TypeInformation, Types}
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment, _}
import org.apache.flink.table.api.scala.BatchTableEnvironment
import org.apache.flink.table.functions.{ScalarFunction, TableFunction}
import org.apache.flink.types.Row
/**
  * Flink table的udtf操作
  *
  * 在flinksql中的udtf的表函数的写法，有点类似于hive中的lateral view的写法
  */
object _09FlinkTable2UDTFOps {
    def main(args: Array[String]): Unit = {
        //创建批对应的Env
        val batchEnv = ExecutionEnvironment.getExecutionEnvironment
        //基于批Env构建BatchTableEnv
        val tblEnv = BatchTableEnvironment.create(batchEnv)

        val lines: DataSet[String] = batchEnv.fromCollection(List(
            "hello you",
            "hello me",
            "hello you"
        ))
        val tbl = tblEnv.fromDataSet(lines).as("line")
        tblEnv.registerTable("tmp", tbl)
        //注册用户自定义函数
        tblEnv.registerFunction("myExplode", new MyExplodeUDTF)
        tblEnv.registerFunction("mySplit", new MySplitUDF)
        val table = tblEnv.sqlQuery(
                """
                  |select
                  |  tt.word,
                  |  count(1) as counts
                  |from (
                  |  select
                  |    word
                  |  from tmp,
                  |  lateral table(myExplode(mySplit(line,' '))) as t(word)
                  |) tt
                  |group by tt.word
                """.stripMargin)
        tblEnv.toDataSet[Row](table).print()
    }
}
class MySplitUDF extends ScalarFunction {
    def eval(line: String, regex: String): Array[String] = {
        line.split(regex)
    }
}
/*
    udtf因为是一路输入，多路输出，所以它的泛型比较特殊，就是Row

    在这些UserDefineFunction持有collector对象，用来完成数据的向下流动
 */
class MyExplodeUDTF extends TableFunction[Row] {
    //返回值类型，首先确定类型为一行数据，所以是Row，那么这每一个Row中包含的数据类型是String
    override def getResultType: TypeInformation[Row] = Types.ROW(Types.STRING)

    def eval(array: Array[String]): Unit = {
        for(word <- array) {
            collector.collect(Row.of(word))
        }
    }
}