package com.mjf.networkflow_analysis

import java.sql.Timestamp
import java.text.SimpleDateFormat
import java.util
import java.util.Map

import com.mjf.dim.{ApacheLogEvent, PageViewCount}
import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor, MapState, MapStateDescriptor}
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.function.WindowFunction
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.util.Collector

import scala.collection.mutable.ListBuffer

/**
 * 网页流量TopN
 */
object NetworkFlowTopNPage {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
    env.setParallelism(1)

    // 从文件读取数据
//    val inputStream: DataStream[String] = env.readTextFile("D:\\coding\\idea\\UserBehaviorAnalysis\\NetworkFlowAnalysis\\src\\main\\resources\\apache.log")

    val inputStream: DataStream[String] = env.socketTextStream("hadoop103", 9999)

    // 将时间字段转化为时间戳
    val sdf: SimpleDateFormat = new SimpleDateFormat("dd/MM/yyyy:HH:mm:ss")

    // 转换为样例类类型，指定timestamp和watermark
    val dataStream: DataStream[ApacheLogEvent] = inputStream.map {
      data =>
        val dataArray: Array[String] = data.split(" ")
        val timestamp: Long = sdf.parse(dataArray(3)).getTime

        ApacheLogEvent(dataArray(0), dataArray(1), timestamp, dataArray(5), dataArray(6))
    }
      .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[ApacheLogEvent](Time.seconds(1)) {
        override def extractTimestamp(element: ApacheLogEvent): Long = element.eventTime
      })

    // 开窗聚合
    val lateOutPutTag: OutputTag[ApacheLogEvent] = new OutputTag[ApacheLogEvent]("late-data")
    val aggStream: DataStream[PageViewCount] = dataStream
      .keyBy(_.url)
      .timeWindow(Time.minutes(10), Time.seconds(5))
      .allowedLateness(Time.seconds(60))
      .sideOutputLateData(lateOutPutTag)
      .aggregate(new PageCountAgg(), new PageCountWindowResult())

    // 获取侧输出流
    val lateDataStream: DataStream[ApacheLogEvent] = aggStream.getSideOutput(lateOutPutTag)

    // 每个窗口的统计值排序输出
    val resultStream: DataStream[String] = aggStream
      .keyBy(_.windowEnd)
      .process(new TopNHotPage(3))


    dataStream.print("data")
    aggStream.print("agg")
    lateDataStream.print("late")
    resultStream.print("result")

    env.execute("NetworkFlowTopNPage")

  }


}

// 自定义预聚合函数
class PageCountAgg extends AggregateFunction[ApacheLogEvent, Long, Long] {
  override def createAccumulator(): Long = 0L

  override def add(value: ApacheLogEvent, accumulator: Long): Long = accumulator + 1

  override def getResult(accumulator: Long): Long = accumulator

  override def merge(a: Long, b: Long): Long = a + b
}

// 自定义windowFunction
class PageCountWindowResult extends WindowFunction[Long, PageViewCount, String, TimeWindow] {
  override def apply(key: String, window: TimeWindow, input: Iterable[Long], out: Collector[PageViewCount]): Unit = {
    out.collect(PageViewCount(key, window.getEnd, input.head))
  }
}

// 自定义ProcessFunction
class TopNHotPage(n: Int) extends KeyedProcessFunction[Long, PageViewCount, String] {
  // 定义MapState保存所有聚合结果
  lazy val pageCountMapState: MapState[String, Long] = getRuntimeContext.getMapState(new MapStateDescriptor[String, Long]("pagecount-map", classOf[String], classOf[Long]))

  override def processElement(value: PageViewCount, ctx: KeyedProcessFunction[Long, PageViewCount, String]#Context, out: Collector[String]): Unit = {
    pageCountMapState.put(value.url, value.count)
    ctx.timerService().registerEventTimeTimer(value.windowEnd + 1)  // 定义排序输出的定时器
    ctx.timerService().registerEventTimeTimer(value.windowEnd + 60 * 1000L) // 定义清空状态的定时器
  }

  // 等到数据都到齐，从状态中取出，排序输出
  override def onTimer(timestamp: Long, ctx: KeyedProcessFunction[Long, PageViewCount, String]#OnTimerContext, out: Collector[String]): Unit = {
    if (timestamp == ctx.getCurrentKey + 60*1000L) {
      pageCountMapState.clear()
      return
    }

    val allPageCountList: ListBuffer[(String, Long)] = ListBuffer()
    val iter: util.Iterator[util.Map.Entry[String, Long]] = pageCountMapState.entries().iterator()
    while(iter.hasNext) {
      val entry: util.Map.Entry[String, Long] = iter.next()
      allPageCountList += ((entry.getKey, entry.getValue))
    }

    val sortedPageCountList: ListBuffer[(String, Long)] = allPageCountList.sortWith(_._2 > _._2).take(n)

    // 将排名信息格式化为String，方便显示
    val result: StringBuilder = new StringBuilder

    result.append("==============================\n\n")
    result.append("时间：").append(new Timestamp(timestamp - 1)).append("\n")
    // 遍历sorted列表，输出TopN信息
    for(i <- sortedPageCountList.indices) {
      val currentItemCount: (String, Long) = sortedPageCountList(i)
      result.append("Top").append(i+1).append(":")
        .append(" 页面url=").append(currentItemCount._1)
        .append(" 访问量=").append(currentItemCount._2)
        .append("\n")
    }

    out.collect(result.toString())
  }
}