package com.xzc.apitest.tabletest

import com.xzc.apitest.source.SensorReading
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.api.{EnvironmentSettings, Over, Tumble}
import org.apache.flink.table.api.scala._
import org.apache.flink.types.Row

object TimeAndWindowTest {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    //1.11之后默认就是这个，之前 OlePlanner
    val settings = EnvironmentSettings.newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()
    //创建表环境
    val tableEnv = StreamTableEnvironment.create(env, settings)

    val inputStream = env.readTextFile("D:\\git\\learning_flink\\_01_试用\\src\\main\\resources\\sensor.txt")

    //将流转换为样例类形式
    val dataStream = inputStream
      .map(data => {
        val arr = data.split(",")
        SensorReading(arr(0), arr(1).toLong, arr(2).toDouble)
      }).assignTimestampsAndWatermarks(
      //在转换为表之前就将流的watermark指定，从而在表里面可以直接使用
      new BoundedOutOfOrdernessTimestampExtractor[SensorReading](Time.seconds(3)) {
        override def extractTimestamp(element: SensorReading): Long = element.timestamp * 1000L
      }
    )

    //建立动态表，最后的字段是flink给的时间特性
    //    val sensorTable = tableEnv.fromDataStream(dataStream,
    //      'id, 'temperature, 'timestamp, 'pt.proctime)
    //这里的rowtime就是上面流里面定义的watermark，覆盖了原有的timestamp
    val sensorTable = tableEnv.fromDataStream(dataStream,
      'id, 'temperature, 'timestamp.rowtime as 'ts)

    //    sensorTable.printSchema()
    //    sensorTable.toAppendStream[Row].print()
    //1.Group Window
    //1.table api
    val resultTable = sensorTable
      //每10秒统计一次，滚动时间窗口
      .window(Tumble over 10.seconds on 'ts as 'tw)
      //用id分组
      .groupBy('id, 'tw)
      //加入关窗时间
      .select('id, 'id.count, 'temperature.avg, 'tw.end)

    //2.sql
    tableEnv.createTemporaryView("sensor", sensorTable)
    val resultSqlTable = tableEnv.sqlQuery(
      """
        |select
        | id,
        | count(id),
        | avg(temperature),
        | tumble_end(ts, interval '10' second)
        | from sensor
        | group by
        |   id,
        |   tumble(ts, interval '10' second)
        |""".stripMargin
    )

    //转换成流打印输出
    //    resultTable.toAppendStream[Row].print("result")
    //这里retract与append实际是一样的，
    // 因为上面的分组后是个死数据，没有更新操作，如果允许延时关窗并有延时数据进来，才会更新聚合
    //    resultSqlTable.toRetractStream[Row].print("sql")

    //2.Over window
    //2.1 table api
    //统计每个sensor每条数据与之前2行数据的平均温度，用事件时间排序
    val overResultTable = sensorTable
      .window(Over partitionBy 'id orderBy 'ts preceding 2.rows as 'ow)
      .select('id, 'ts, 'id.count over 'ow, 'temperature.avg over 'ow)

    //2.2 sql
    val overResultSqlTable = tableEnv.sqlQuery(
      """
        |select
        | id,
        | ts,
        | count(id) over ow,
        | avg(temperature) over ow
        | from sensor
        | window ow as (
        |   partition by id
        |   order by ts
        |   rows between 2 preceding and current row
        | )
        |""".stripMargin
    )

    overResultTable.toAppendStream[Row].print("result")
    overResultSqlTable.toAppendStream[Row].print("sql   ")

    env.execute("table time and window test")
  }

}
