package com.atguigu.networkflow_analysis

import java.sql.Timestamp
import java.{lang, util}
import java.text.SimpleDateFormat
import java.util.{Date, Map}

import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor, MapState, MapStateDescriptor}
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.function.WindowFunction
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.util.Collector

import scala.collection.mutable.ListBuffer

/**
 * 基于服务器log的热门页面浏览量统计   pv  (page view)
 * 每隔5秒，输出最近10分钟内访问量最多的前N个URL
 *
 * Project: UserBehaviorAnalysis
 * Package: com.atguigu.networkflow_analysis
 * Version: 1.0
 *
 * Created by  WangJX  on 2019/12/11 19:22
 */
object NetworkFlow {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    //设置事件时间，默认使用process
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    env.setParallelism(1)

    val outputTag = new OutputTag[ApacheLogEvent]("output")

    //    val path: String = getClass.getResource("apache.log").getPath
    val path: String = NetworkFlow.getClass.getClassLoader.getResource("apache.log").getPath

    //    val value = env.readTextFile(path)
    val value = env.socketTextStream("localhost", 7777)
      .map(
        data => {
          val dataArrays: Array[String] = data.split(" ")

          val date: Date = new SimpleDateFormat("dd/MM/yyyy:HH:DD:ss").parse(dataArrays(3))
          ApacheLogEvent(dataArrays(0).trim, date.getTime, dataArrays(5).trim, dataArrays(6).trim)
        }
      )
      .filter(_.method == "GET")
      .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[ApacheLogEvent](Time.seconds(1)) {
        override def extractTimestamp(element: ApacheLogEvent): Long = element.eventTime
      })


    val filterData = value.keyBy(_.url)
      .timeWindow(Time.minutes(10), Time.seconds(5))

    //迟到数据侧输出流(flink的滑动过期侧输出流比较诡异，只有当数据不属于任何一个window才会输出，否则不会输出到侧输出流)
    val sideOutputData = filterData.allowedLateness(Time.minutes(1))
      .sideOutputLateData(outputTag)
      .aggregate(new MyCount(), new MyWindow())     //aggregate中保存了计算结果的状态


    //输出聚合结果
    val windowData = filterData
      .aggregate(new MyCount(), new MyWindow())   //aggregate中保存了计算结果的状态

    //输出最终结果
    val aggregateData = windowData.keyBy(_.windowEnd)
      .process(new MyProcess(3))


    value.print("value").setParallelism(1)
    //只有当迟到的数据不属于任何一个开着的窗口，才会被这条流输出到侧输出流
    sideOutputData.getSideOutput(outputTag).print("sideOutputData").setParallelism(1)

    windowData.print("windowData")
    aggregateData.print("aggregateData").setParallelism(1)


    env.execute("NetworkFlow job")
  }

}

//样例类输入数据
case class ApacheLogEvent(
                           ip: String,
                           eventTime: Long,
                           method: String,
                           url: String
                         )

case class UrlViewCount(
                         url: String,
                         windowEnd: Long,
                         count: Long
                       )


class MyCount() extends AggregateFunction[ApacheLogEvent, Long, Long] {
  override def createAccumulator(): Long = 0L

  override def add(value: ApacheLogEvent, accumulator: Long): Long = accumulator + 1

  override def getResult(accumulator: Long): Long = accumulator

  override def merge(a: Long, b: Long): Long = a + b
}

class MyWindow() extends WindowFunction[Long, UrlViewCount, String, TimeWindow] {
  override def apply(key: String, window: TimeWindow, input: Iterable[Long], out: Collector[UrlViewCount]): Unit = {
    out.collect(UrlViewCount(key, window.getEnd, input.last))
  }
}

class MyProcess(TopN: Int) extends KeyedProcessFunction[Long, UrlViewCount, String] {


  //使用map集合对迟到数据进行去重
  lazy val mapState: MapState[String, Long] = getRuntimeContext.getMapState(new MapStateDescriptor[String, Long]("map_stae", classOf[String], classOf[Long]))


  override def processElement(value: UrlViewCount, ctx: KeyedProcessFunction[Long, UrlViewCount, String]#Context, out: Collector[String]): Unit = {
    mapState.put(value.url, value.count)

    ctx.timerService().registerEventTimeTimer(value.windowEnd + 1)
  }

  override def onTimer(timestamp: Long, ctx: KeyedProcessFunction[Long, UrlViewCount, String]#OnTimerContext, out: Collector[String]): Unit = {
    val entry = mapState.entries().iterator()

    val list = new ListBuffer[(String, Long)]()
    while (entry.hasNext) {
      val data: Map.Entry[String, Long] = entry.next()
      val key: String = data.getKey
      val value: Long = data.getValue

      list += ((key, value))
    }

    //当使用allowedLateness定义迟到数据时，clear生效,
    // 但是在之前的aggregate中还是保存了数据，所以现象貌似于没有清除数据
    //    mapState.clear()

    val tuples: ListBuffer[(String, Long)] = list.sortWith(_._2 > _._2).take(TopN)


    val sb = new StringBuilder()
    sb.append("============================\n")
    sb.append("关闭时间：").append(new Timestamp(timestamp - 1)).append("\n")

    for (elem <- tuples.indices) {
      sb.append("NO:").append(elem + 1)
        .append("  URL=").append(tuples(elem)._1)
        .append(" 流量=").append(tuples(elem)._2)
        .append("\n")
    }

    Thread.sleep(100L)

    out.collect(sb.toString())
  }
}

