package com.boot.study.udf

import com.boot.study.api.SensorReading
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.api._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.functions.TableAggregateFunction
import org.apache.flink.types.Row
import org.apache.flink.util.Collector

object TableAggregateFunctionTest {
  def main(args: Array[String]): Unit = {
    // 1: 创建环境
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    // TimeCharacteristic.EventTime 事件时间
    // TimeCharacteristic.ProcessingTime 处理时间
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) // 时间语义，处理时间

    // 创建表执行环境
    val settings: EnvironmentSettings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()
    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env, settings)

    val inputPath: String = "D:\\WorkSpace\\idea\\Flink\\src\\main\\resources\\sensor.txt"
    val inputSteam: DataStream[String] = env.readTextFile(inputPath)
    val dataStream: DataStream[SensorReading] = inputSteam.map(data => {
      val arr: Array[String] = data.split(",")
      SensorReading(arr(0), arr(1).toLong, arr(2).toDouble)
    })
      // 延迟1秒生成 watermark
      .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[SensorReading](Time.seconds(1)) {
        override def extractTimestamp(element: SensorReading): Long = element.timeStamp * 1000
      })
    val sensorTable: Table = tableEnv.fromDataStream(dataStream, 'id, 'temperature, 'timeStamp.rowtime as 'ts)

    // 1:table api
    val top2Temp = new Top2Temp
    val resultTable: Table = sensorTable
      .groupBy('id)
      .flatAggregate(top2Temp('temperature) as('temp, 'rank))
      .select('id, 'temp, 'rank)

    // 2:sql实现（实现比较麻烦）

    // 3: 输出
    resultTable.toRetractStream[Row].print("table")

    env.execute(" table aggregate function test")
  }
}

// 定义一个类专门表示聚合函数的状态
class Top2TempAcc {
  var highestTemp: Double = Double.MinValue
  var secondHighsetTemp: Double = Double.MinValue
}

// 自定义表聚合函数，提取所有温度中最高的2个温度,输出(temp,rank)
class Top2Temp extends TableAggregateFunction[(Double, Int), Top2TempAcc] {
  override def createAccumulator(): Top2TempAcc = new Top2TempAcc

  // 实现聚合结果的函数accumulate
  def accumulate(acc: Top2TempAcc, temp: Double) = {
    // 判断当前温度值是否状态中的值大
    if (temp > acc.highestTemp) {
      // 如果比最高温度还高，原来的第一排到第二
      acc.secondHighsetTemp = acc.highestTemp
      acc.highestTemp = temp
    } else if (temp > acc.secondHighsetTemp) {
      // 如果当前温度大于第2温度
      acc.secondHighsetTemp = temp
    }
  }

  // 实现输出结果的方法
  def emitValue(acc: Top2TempAcc, out: Collector[(Double, Int)]) = {
    out.collect((acc.highestTemp, 1))
    out.collect((acc.secondHighsetTemp, 2))
  }
}


