package com.feiwei.udf

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api._
import org.apache.flink.table.api.scala.StreamTableEnvironment
import org.apache.flink.table.descriptors.{Csv, FileSystem, Schema}
import org.apache.flink.table.functions._
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.scala._
import org.apache.flink.types.Row

object day13_TableFunctionTest {

  def main(args: Array[String]): Unit = {

    val environment = StreamExecutionEnvironment.getExecutionEnvironment
    val tableEnv = StreamTableEnvironment.create(environment)

    val filePath = "E:\\repository\\company\\myself\\flink-learning\\flink-learning-demo\\src\\main\\resources\\sensor.txt"

    tableEnv.connect(new FileSystem().path(filePath))
      .withFormat(new Csv())
      .withSchema(new Schema()
        .field("id", DataTypes.STRING())
        .field("timestamp", DataTypes.BIGINT())
        .field("temp", DataTypes.DOUBLE())
      )
      .createTemporaryTable("inputTable")
    //2转换操作
    val source = tableEnv.from("inputTable")
    val split = new Split()
    //api 方式
    val t1 = source
      //侧连接
      .joinLateral(split('id) as('world, 'length))
      .select('id, 'timestamp, 'world, 'length)

    //sql 方式
    tableEnv.createTemporaryView("sentor", source)

    tableEnv.registerFunction("split", split)

    val t2 = tableEnv.sqlQuery(
      """
        |
        |select
        |id,
        |`timestamp`,
        |world,
        |length
        |from sentor,
        |lateral table (split(id)) as sp( world,length)
        |
        |
      """.stripMargin)

    t1.toAppendStream[Row].print("api")

    t2.toAppendStream[Row].print("sql")

    environment.execute()

  }

}


class Split() extends TableFunction[(String, Int)] {


  def eval(s: String): Unit = {
    s.split("_").foreach(
      v => collect((v, v.length))
    )
  }


}