package com.mjf.day8

import com.mjf.day3.SensorReading
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.api.Table
import org.apache.flink.table.api.scala._
import org.apache.flink.table.functions.{AggregateFunction, ScalarFunction, TableFunction}
import org.apache.flink.types.Row

/**
 * 自定义函数：聚合函数（AggregateFunction）
 *  可以把一个表中的数据，聚合成一个标量值（未实现）
 */
object AggregateFunctionExample {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    env.setParallelism(1)
    // 指定时间语义为EventTime
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    // 读取数据到DataStream
    val inputStream: DataStream[SensorReading] = env
      .readTextFile("D:\\coding\\idea\\flink-stu\\src\\main\\input\\sensor.txt")
      .map{
        data =>
        val dataArray: Array[String] = data.split(",")
          SensorReading(dataArray(0), dataArray(1).toLong, dataArray(2).toDouble)
      }
      // 定义watermark
      .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[SensorReading](Time.seconds(1)) {
        override def extractTimestamp(element: SensorReading): Long = element.timestamp * 1000L
      })

    // 创建表执行环境
    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env)

    // 基于DataStream数据流，转换为一张表，然后进行操作
    val sensorTable: Table =
      tableEnv.fromDataStream(inputStream, 'id, 'timestamp.rowtime as 'ts, 'temperature)



    env.execute("AggregateFunctionExample")

  }
}

// 自定义聚合函数
class MyAgg extends AggregateFunction[Int, Double]{
  override def getValue(accumulator: Double): Int = ???

  override def createAccumulator(): Double = 0L

  def accumulator(accumulator: Double, field: String): Unit = {

  }
}
