package com.mjf.hotitems_analysis

import java.sql.Timestamp
import java.util.Properties

import com.mjf.dim.{ItemViewCount, UserBehavior}
import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor}
import org.apache.flink.api.java.tuple.{Tuple, Tuple1}
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.function.WindowFunction
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.util.Collector

import scala.collection.mutable.ListBuffer

/**
 * 实时热门商品TopN
 */
object HotItems {
  def main(args: Array[String]): Unit = {

    // 创建流处理执行环境
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    // 定义时间语义
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    // 从文件读取数据
//    val inputStream: DataStream[String] = env.readTextFile("D:\\coding\\idea\\UserBehaviorAnalysis\\HotItemsAnalysis\\src\\main\\resources\\UserBehavior.csv")

    // 从Kafka读取数据
    val properties: Properties = new Properties()
    properties.setProperty("bootstrap.servers", "hadoop103:9092")
    properties.setProperty("group.id", "consumer-group")
    properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty("auto.offset.reset", "latest")

    // Kafka通用连接器
    val inputStream: DataStream[String] = env.addSource(new FlinkKafkaConsumer[String]("hotitems", new SimpleStringSchema(), properties))

    // 将数据转换为样例类，并且提取timestamp定义watermark
    val userBehaviorStream: DataStream[UserBehavior] = inputStream.map {
      line =>
        val dataArray: Array[String] = line.split(",")
        UserBehavior(dataArray(0).toLong, dataArray(1).toLong, dataArray(2).toInt, dataArray(3), dataArray(4).toLong)
    }.assignAscendingTimestamps(_.timestamp * 1000L)  // 数据是有序的，不需要定义延迟处理乱序数据

    // 对数据进行转换，过滤出PV行为，开窗聚合统计个数
    val aggrStream: DataStream[ItemViewCount] = userBehaviorStream
      .filter(_.behavior == "pv") // 过滤 pv 行为
      .keyBy("itemId") // 按照itemId分组
      .timeWindow(Time.minutes(60), Time.minutes(5)) // 定义滑动窗口
      .aggregate(new CountAgg(), new ItemCountWindowResult()) // 将预聚合结果传入自定义窗口函数

    // 对窗口聚合结果按照窗口进行分组，并做排序取TopN输出
    val resultStream: DataStream[String] = aggrStream
      .keyBy("windowEnd")
      .process(new TopNHotItems(5))

    resultStream.print()

    env.execute(HotItems.getClass.getName)

  }
}


// 自定义预聚合函数
class CountAgg extends AggregateFunction[UserBehavior, Long, Long] {
  override def createAccumulator(): Long = 0L

  override def add(value: UserBehavior, accumulator: Long): Long = accumulator + 1  // 来一条数据就加1

  override def getResult(accumulator: Long): Long = accumulator

  override def merge(a: Long, b: Long): Long = a + b
}


// 自定义窗口函数，结合window信息包装成样例类
// 输入类型为预聚合函数的输出，KEY类型为分组字段（itemId）类型
class ItemCountWindowResult extends WindowFunction[Long, ItemViewCount, Tuple, TimeWindow] {
  override def apply(key: Tuple, window: TimeWindow, input: Iterable[Long], out: Collector[ItemViewCount]): Unit = {
    val itemId: Long = key.asInstanceOf[Tuple1[Long]].f0 // 默认为Scala的Tuple1，需要手动引入Java的Tuple1
    val windowEnd: Long = window.getEnd
    val count: Long = input.iterator.next() // Iterable中只有一个值

    out.collect(ItemViewCount(itemId, windowEnd, count))
  }
}


// 自定义 KeyProcessFunction
class TopNHotItems(n: Int) extends KeyedProcessFunction[Tuple, ItemViewCount, String] {

  // 定义一个ListState,用来保存当前窗口所有的count结果
  lazy val itemCountListState: ListState[ItemViewCount] = getRuntimeContext.getListState(new ListStateDescriptor[ItemViewCount]("itemcount-list", classOf[ItemViewCount]))

  override def processElement(value: ItemViewCount, ctx: KeyedProcessFunction[Tuple, ItemViewCount, String]#Context, out: Collector[String]): Unit = {
    // 每来一条数据，就把它保存到状态中
    itemCountListState.add(value)
    // 注册定时器，在 windowEnd + 100 触发
    ctx.timerService().registerEventTimeTimer(value.windowEnd + 100)  // 定时器重复注册只会触发一次
  }

  // 定时触发时，从状态中取数据，然后排序输出
  override def onTimer(timestamp: Long, ctx: KeyedProcessFunction[Tuple, ItemViewCount, String]#OnTimerContext, out: Collector[String]): Unit = {
    // 先把状态中的数据提取到一个ListBuffer中
    val allItemCountList: ListBuffer[ItemViewCount] = ListBuffer()

    // ListState是Java接口，使用Scala方式遍历时需要引入
    import scala.collection.JavaConversions._

    for(itemCount <- itemCountListState.get()) {
      allItemCountList += itemCount
    }

    // 按照count值大小排序，取TopN
    val sortedItemCountList: ListBuffer[ItemViewCount] = allItemCountList.sortBy(-_.count).take(n)

    // 清除状态
    itemCountListState.clear()

    // 将排名信息格式化为String，方便显示
    val result: StringBuilder = new StringBuilder

    result.append("==============================\n\n")
    result.append("时间：").append(new Timestamp(timestamp - 100)).append("\n")
    // 遍历sorted列表，输出TopN信息
    for(i <- sortedItemCountList.indices) {
      // 获取当前商品信息
      val currentItemCount: ItemViewCount = sortedItemCountList(i)
      result.append("Top").append(i+1).append(":")
        .append(" 商品ID=").append(currentItemCount.itemId)
        .append(" 访问量=").append(currentItemCount.count)
        .append("\n")
    }

    out.collect(result.toString())

  }

}