package com.mjf.day8

import com.mjf.day3.SensorReading
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.api.Table
import org.apache.flink.table.api.scala._
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.types.Row

/**
 * 自定义函数：标量函数（ScalarFunction）
 *    将0、1或多个标量值，映射到新的标量值
 */
object ScalarFunctionExample {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    env.setParallelism(1)
    // 指定时间语义为EventTime
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    // 读取数据到DataStream
    val inputStream: DataStream[SensorReading] = env
      .readTextFile("D:\\coding\\idea\\flink-stu\\src\\main\\input\\sensor.txt")
      .map{
        data =>
        val dataArray: Array[String] = data.split(",")
          SensorReading(dataArray(0), dataArray(1).toLong, dataArray(2).toDouble)
      }
      // 定义watermark
      .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[SensorReading](Time.seconds(1)) {
        override def extractTimestamp(element: SensorReading): Long = element.timestamp * 1000L
      })

    // 创建表执行环境
    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env)

    // 基于DataStream数据流，转换为一张表，然后进行操作
    val sensorTable: Table =
      tableEnv.fromDataStream(inputStream, 'id, 'timestamp.rowtime as 'ts, 'temperature)

    // 创建一个UDF的实例
    val hashCode: HashCode = new HashCode(10)

    // 调用Table Api
    val resultTable: Table = sensorTable.select('id, 'ts, hashCode('id))
    resultTable.toAppendStream[Row].print("result")

    // SQL实现
    tableEnv.registerFunction("hashCode", hashCode)
    val resultSqlTable: Table = tableEnv.sqlQuery(s"select id,ts,hashCode(id) from ${sensorTable}")
    resultSqlTable.toAppendStream[Row].print("sql result")


    env.execute("ScalarFunctionExample")

  }
}

// 自定义标量函数
class HashCode(factor: Int) extends ScalarFunction{
  // 必须要实现一个eval方法
  // 它的参数是当前传入的字段
  def eval(str: String): Int = {
    str.hashCode * factor
  }
}
