package cn.azzhu.proj

import java.sql.Timestamp

import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.table.api.bridge.scala.{StreamTableEnvironment, tableConversions}
import org.apache.flink.table.api._

/**
 * 使用Flink SQL实现实时热门商品 top n 统计
 *  和底层api的实现有什么区别呢？
 *  sql没办法做增量聚合和全窗口聚合结合使用，所以很占存储空间
 * @author azzhu
 * @create 2020-09-23 13:50:24
 */
object UserBehaviourAnalysisBySQL {
  case class UserBehaviour(userId:Long,
                          itemId:Long,
                          categoryId:Int,
                          behaviour: String,
                          timestamp:Long)

  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    //为了时间旅行，必须使用事件时间
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    val stream = env
      .readTextFile("D:\\bigdata\\flink-learning\\src\\main\\resources\\UserBehavior.csv")
      .map(line => {
        val arr = line.split(",")
        //todo 注意：时间戳单位必须是毫秒
        UserBehaviour(arr(0).toLong, arr(1).toLong, arr(2).toInt, arr(3), arr(4).toLong * 1000)
      })
      .filter(_.behaviour.equals("pv")) //过滤出pv事件
      .assignAscendingTimestamps(_.timestamp)   //分配升序时间戳

    val settings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner()
      .inStreamingMode()
      .build()

    val tEnv = StreamTableEnvironment.create(env,settings)

    tEnv.createTemporaryView("t",stream,'itemId,'timestamp.rowtime as 'ts)

    //HOP_END 是关键字，用来获取窗口结束时间
    //最内层的子查询相当于stream.keyBy.timeWindow.aggregate
    //倒数第二层的子查询 相当于 .keyBy(_.windowEnd).process(排序)
    //最外层查询相当于.take(n)
    tEnv
      .sqlQuery(
        """
          |SELECT *
          |FROM (
          |    SELECT *,
          |        ROW_NUMBER() OVER
          |        (PARTITION BY windowEnd ORDER BY icount DESC) as row_num
          |    FROM
          |    (SELECT count(itemId) as icount,
          |     TUMBLE_START(ts, INTERVAL '1' HOUR) as windowEnd
          |     FROM t GROUP BY TUMBLE(ts, INTERVAL '1' HOUR), itemId) topn)
          |WHERE row_num <= 5
          |""".stripMargin
      )
      .toRetractStream[(Long, Timestamp, Long)] //每个窗口的前三名是不断变化的，所以用撤回流
      .filter(_._1 == true) //把更新的数据过滤出来
      .print()

    //stream.print()

    env.execute("UserBehaviourAnalysisBySQL")
  }

}
