package com.xzc.apitest.udftest

import java.util

import com.xzc.apitest.source.SensorReading
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFactory}
import org.apache.calcite.schema.FunctionParameter
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.api.{EnvironmentSettings, Over, Tumble}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.functions.{ScalarFunction, TableFunction}
import org.apache.flink.types.Row

object TableFunctionTest {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    //1.11之后默认就是这个，之前 OlePlanner
    val settings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()
    //创建表环境
    val tableEnv = StreamTableEnvironment.create(env, settings)

    val inputStream = env.readTextFile("D:\\git\\learning_flink\\_01_试用\\src\\main\\resources\\sensor.txt")

    //将流转换为样例类形式
    val dataStream = inputStream
      .map(data => {
        val arr = data.split(",")
        SensorReading(arr(0), arr(1).toLong, arr(2).toDouble)
      }).assignTimestampsAndWatermarks(
      //在转换为表之前就将流的watermark指定，从而在表里面可以直接使用
      new BoundedOutOfOrdernessTimestampExtractor[SensorReading](Time.seconds(3)) {
        override def extractTimestamp(element: SensorReading): Long = element.timestamp * 1000L
      }
    )

    //建立动态表，最后的字段是flink给的时间特性
    //    val sensorTable = tableEnv.fromDataStream(dataStream,
    //      'id, 'temperature, 'timestamp, 'pt.proctime)
    //这里的rowtime就是上面流里面定义的watermark，覆盖了原有的timestamp
    val sensorTable = tableEnv.fromDataStream(dataStream,
      'id, 'temperature, 'timestamp.rowtime as 'ts)

    //1-table api
    val split = new Split("_")
    val resultTable = sensorTable
      //与wordcount的输出连接
      .joinLateral(split('id) as('word, 'length))
      .select('id, 'ts, 'word, 'length)

    //2-sql
    tableEnv.createTemporaryView("sensor", sensorTable)
    tableEnv.registerFunction("split", split)
    val resultSqlTable = tableEnv.sqlQuery(
      """
        |select
        | id,ts,word,length
        | from
        | sensor, lateral table(split(id)) as splitid(word,length)
        |""".stripMargin
    )

    resultTable.toAppendStream[Row].print("table")
    resultSqlTable.toAppendStream[Row].print("sql")

    env.execute("UDF test")
  }
}

class Split(separator: String) extends TableFunction[(String, Int)] {
  def eval(s: String): Unit = {
    s.split(separator).foreach(
      word => collect((word, word.length))
    )
  }
}
