package com.udf.flink.scala.apitest.watermark

import java.sql.Timestamp
import java.time.Duration

import org.apache.flink.api.common.eventtime.{SerializableTimestampAssigner, WatermarkStrategy}
import org.apache.flink.api.common.functions.RichMapFunction
import org.apache.flink.api.common.state.{ValueState, ValueStateDescriptor}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.table.api.Tumble

//import com.udf.flink.scala.examples.reduces.{MyProcessFunction2, User}
//import org.apache.flink.api.common.eventtime.{SerializableTimestampAssigner, WatermarkStrategy}
import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.util.Collector
case class User(user_id: Int, count: Int, a: String,  rowtime: Timestamp)
//重定向的处理
object LateTest {
  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    //设置并行度
    env.setParallelism(1)
    //设置时间为事件时间 1.12后默认s设置了这个
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
    //
//    val stream = env.socketTextStream("localhost", 9000, '\n')
val tableEnv = StreamTableEnvironment.create(env)
    val sql= """CREATE TABLE ordr (
              id INT,
              cnt INT,
              user_id INT,
              a STRING,
              rowtime AS localtimestamp,
              WATERMARK FOR rowtime AS rowtime - INTERVAL '20' SECOND
              ) WITH (
                'connector' = 'datagen',
              'rows-per-second'='10',
              'fields.id.kind'='sequence',
              'fields.id.start'='1',
              'fields.id.end'='200',
              'fields.cnt.min'='1',
              'fields.cnt.max'='2',
              'fields.user_id.min'='1',
              'fields.user_id.max'='20',
              'fields.a.length'='1'
              )"""
    tableEnv.executeSql(sql)
//    tableEnv.getConfig.getConfiguration.getString()
//    tableEnv.createTemporarySystemFunction("TsToLong", classOf[TsToLong])
//    1,严格意义上递增的时间戳,发出到目前为止已观察到的最大时间戳的水印。时间戳小于最大时间戳的行不会迟到。
//    WATERMARK FOR rowtime_column AS rowtime_column
//
//    2,递增的时间戳,发出到目前为止已观察到的最大时间戳为负1的水印。时间戳等于或小于最大时间戳的行不会迟到。
//    WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '0.001' SECOND.
//
//    3,有界时间戳(乱序) 发出水印，它是观察到的最大时间戳减去指定的延迟，例如，WATERMARK FOR rowtime_column AS rowtime_column-INTERVAL'5'SECOND是5秒的延迟水印策略。
//    WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL 'string' timeUnit.

    val tb=tableEnv.sqlQuery("""select user_id,cnt,a,rowtime rowtime from ordr""")
    tableEnv.toDataStream(tb,classOf[User])
//      .assignTimestampsAndWatermarks( WatermarkStrategy
//        .forBoundedOutOfOrderness(Duration.ofSeconds(60))
//        .withTimestampAssigner(new SerializableTimestampAssigner[User] {
//          override def extractTimestamp(element: User,recordTimestamp: Long): Long = element.rowtime.getTime
//        }))
      .keyBy(user=>user.user_id)
      .window(TumblingEventTimeWindows.of(Time.seconds(60)))
      .process(new PrintWindow).print()

//    val tb2=tableEnv.sqlQuery(
//      """select tumble_start(rowtime, INTERVAL '20' SECOND) starts,sum(cnt) scnt
//        |from ordr GROUP BY tumble(rowtime, INTERVAL '20' SECOND)""".stripMargin)
//    tableEnv.toDataStream(tb2).print()
//    //数据来源 "a 100 12"   "a 99 14"
//    val value = stream.map(x => {
//      val str = x.split(" ")
//      (str(0), str(1).toInt, str(2).toLong * 1000)
//      //未设置延迟的水位线
//    }).assignAscendingTimestamps(x => x._3).keyBy(_._1).timeWindow(Time.seconds(5))
//      //直接将迟到数据重定向到”late_date"的数据流中
//      .sideOutputLateData(new OutputTag[(String, Int, Long)]("late_date"))
//      //.process(new MaxFunction)
//      .aggregate(new MaxFunction)
//
//    //获取“late_date"的测输出流
//    value.getSideOutput(new OutputTag[(String,Int,Long)]("late_date")).print()
    env.execute()

    /* stream.map(x=>{
      val str = x.split(" ")
      (str(0),str(1).toLong*1000)*/

  }

  //采用全窗口函数的形式
  /*class MaxFunction extends ProcessWindowFunction[(String,Int,Long),(String,Int),String,TimeWindow]{
    override def process(key: String, context: Context, elements: Iterable[(String, Int, Long)], out: Collector[(String, Int)]): Unit = {

      out.collect((key,elements.map(_._2).toIterator.max))
    }

  }*/

  // in out key windom
  //采取质量函数的形式
  class MaxFunction extends AggregateFunction[(String,Int,Long),(String,Int),(String,Int)]{
    // 累加逻辑
    override def add(in: (String, Int, Long), acc: (String, Int)): (String, Int) = {
      (in._1,in._2.max(acc._2))}
    //初始化累加器
    override def createAccumulator(): (String, Int) =
      ("",0)
    //返回结果
    override def getResult(acc: (String, Int)): (String, Int) =
      acc
    //累加器聚合
    override def merge(acc: (String, Int), acc1: (String, Int)): (String, Int) =
      (acc._1,acc._2.max(acc1._2))
  }
}
class PrintWindow extends ProcessWindowFunction[User, (Long, User), Int, TimeWindow] {
  override def process(key: Int, context: Context, elements: Iterable[User], out: Collector[(Long, User)]): Unit = {
    val min = elements.iterator.next
    out.collect((context.window.getStart, min))
  }
}

class PrintEvent extends   RichMapFunction[User, String] {
  var startTime: Long = _

  override def open(parameters: Configuration): Unit = {
    startTime = System.currentTimeMillis()
  }

  override def map(in: User): String = {
    // 每条记录的处理时间
    // getRuntimeContext()
//    getRuntimeContext.get
    val str: String = in.rowtime + "处理时间:" + System.currentTimeMillis()
    s"开始时间:$startTime, 当前数据$str"
  }

  override def close(): Unit = {}
}

class MyAverageComputer extends KeyedProcessFunction[String, User, String] {
  // keyed state: 每个实例和一个key分区对应。此处注册ValueState类型状态
  lazy val sum: ValueState[Int] =
    getRuntimeContext.getState(new ValueStateDescriptor[Int]("sum", classOf[Int]))
  override def processElement(i: User,
                              ctx: KeyedProcessFunction[String, User, String]#Context,
                              collector: Collector[String]): Unit = {
    var value: Int = sum.value() // 获取上一次的聚合结果
    value = value + i.count
    sum.update(value) //将本次的聚合结果更新回 ValueState实例
    // 收集记录到流
    val ts=ctx.timerService().currentWatermark()
    collector.collect(s"${i.a}_sum: $value")
  }
}
//https://www.cnblogs.com/bbgs-xc/p/13431538.html