package com.chb.userbehavioranalysis.hotitem

import java.sql.Timestamp
import java.util.Properties

import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor}
import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.scala.function.WindowFunction
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.util.Collector

import scala.collection.mutable.ListBuffer

/**
 * 我们将实现一个“实时热门商品”的需求，可以将“实时热门商品”理解为：每隔5分钟输出最近一小时内点击量最多的前N个商品。 将这个需求进行分解我们大概要做这么几件事情：
 * • 抽取出业务时间戳，告诉Flink框架基于业务时间做窗口
 * • 过滤出点击行为数据
 * • 按一小时的窗口大小，每5分钟统计一次，做滑动窗口聚合（Sliding Window）
 * • 按每个窗口聚合，输出每个窗口中点击量前N名的商品
 */
object HotItems {
    def main(args: Array[String]): Unit = {

        val streamEnv = StreamExecutionEnvironment.getExecutionEnvironment
        import org.apache.flink.streaming.api.scala._

        //设置时间语义
        streamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)


        val properties = new Properties()
        properties.setProperty("bootstrap.servers", "10.0.0.201:9092")
        properties.setProperty("group.id", "consumer-group")
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
        properties.setProperty("auto.offset.reset", "latest")

        // 设置原数据
        //val dataStream = streamEnv.addSource(new FlinkKafkaConsumer[String]("hotitems", new SimpleStringSchema(), properties))
        val dataStream = streamEnv.readTextFile(getClass.getResource("/UserBehavior.csv").getPath)

        // 处理分析
        val result = dataStream.map(line => {
            val arr = line.split(",")
            UserBehavior(arr(0).toLong, arr(1).toLong, arr(2).toInt, arr(3), arr(4).toLong * 1000) // 数据中的时间是秒值， FLink中处理的都是毫秒
        })
            // 设置水位线
            .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[UserBehavior](Time.seconds(3)) {
                override def extractTimestamp(t: UserBehavior): Long = {
                    t.timestamp
                }
            })
            .filter(_.behavior == "pv")
            .keyBy(_.itemId)
            .timeWindow(Time.seconds(30), Time.seconds(5))
            .aggregate(new CountAgg(), new ResultWindowFunction())
            .keyBy(_.windowEnd)
            .process(new TopNHotItems(3))
        result.print()
        streamEnv.execute("hot ItermN")
    }

    // 全量聚合
    class  TopNHotItems(topSize: Int) extends KeyedProcessFunction[Long, ItemViewCount, String] {
        // 定义itemState,用于存储每个窗口的所有数据
        var itemState: ListState[ItemViewCount] = _

        // 进行初始换操作
        override def open(parameters: Configuration): Unit = {
            val itemStateDesc = new ListStateDescriptor[ItemViewCount]("itemState", classOf[ItemViewCount])
            itemState = getRuntimeContext.getListState(itemStateDesc)
        }

        override def processElement(i: ItemViewCount, context: KeyedProcessFunction[Long, ItemViewCount, String]#Context,
                                    collector: Collector[String]): Unit = {
            itemState.add(i)
            // 注册定时器
            context.timerService().registerEventTimeTimer(i.windowEnd + 1) // 延时1ms
        }

        // 一个窗口所有数据到达
        override def onTimer(timestamp: Long, ctx: KeyedProcessFunction[Long, ItemViewCount, String]#OnTimerContext, out: Collector[String]): Unit = {
            val allItems = new ListBuffer[ItemViewCount]()
            import  scala.collection.JavaConversions._
            for (item <- itemState.get()) {
                allItems += item
            }
            itemState.clear() // 清除状态

            // 按照点击量排序， 逆序， 选择TopN
            val topNItrem = allItems.sortBy(_.count)(Ordering[Long].reverse).take(topSize)

            // 输出
            val result = new StringBuilder()
            result.append("==================================\n")
            result.append("时间： ").append(new Timestamp((timestamp-1))).append("\n")
            for (idx <- topNItrem.indices) {
                val curItem = topNItrem(idx)
                // No1： 商品ID = 123  浏览量=232
                result.append("No").append(idx+1).append(":")
                result.append("商品ID=").append(curItem.itemId).append(" ")
                result.append("浏览量=").append(curItem.count + "\n")
            }
            result.append("==================================\n\n")
            Thread.sleep(1000)
            out.collect(result.toString())
        }
    }
    // 自定义WindowFunction
    class ResultWindowFunction() extends WindowFunction[Long, ItemViewCount, Long, TimeWindow] {
        // 每次窗口结束，执行的
        override def apply(key: Long, window: TimeWindow, input: Iterable[Long], out: Collector[ItemViewCount]): Unit = {
            val count = input.iterator.next()
            out.collect(ItemViewCount(key, window.getEnd, count)) // 输出结果
        }
    }

    // 自定义聚合函数， 这个是增量聚合
    class CountAgg() extends AggregateFunction[UserBehavior, Long, Long] {
        override def createAccumulator(): Long = 0 // 初始化累加器

        override def add(in: UserBehavior, acc: Long): Long = acc + 1

        override def getResult(acc: Long): Long = acc

        override def merge(acc: Long, acc1: Long): Long = acc + acc1
    }

}
