package com.wdl.user

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer

import java.util.Properties

object HotItems {

  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    /** 设置并行度 */
    env.setParallelism(1)

    /** 设置时间语义 */
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    /** 读数据：文件形式 */
    val inputStream: DataStream[String] =
//      env.readTextFile("E:\\bigdata_project\\UserBehaviorAnalysis\\HotItemsAnalysis\\src\\main\\resources\\UserBehavior.csv")
    env.addSource( new FlinkKafkaConsumer[String](KafkaUtil.topic, new SimpleStringSchema(), KafkaUtil.kafkaConsumerProperties()) )

    val aggStream: DataStream[UserBehavior] = inputStream.map(
      data => {
        val dataArray: Array[String] = data.split(",")
        /** 数据：543462,1715,1464116,pv,  1511658000 */
        UserBehavior(dataArray(0).toLong, dataArray(1).toLong, dataArray(2).toInt, dataArray(3).toString, dataArray(4).toLong)
      })
      /** 设置要选取的事件时间 */
      .assignAscendingTimestamps(_.timestamp * 1000L)

    val itemViewCountStream: DataStream[ItemViewCount] = aggStream
      /** 过滤 pv 行为 */
      .filter(_.behavior == "pv")
      /** 按 key 分组 */
      .keyBy("itemId")

      /** 定义滑动窗口，窗口大小 1 小时， 滑动步长 5 分钟 */
      .timeWindow(Time.hours(1), Time.minutes(5))
      .aggregate(new CountAgg(), new ItemCountWindowResult())

    itemViewCountStream
      .keyBy("windowEnd")
      .process( new TopNHotItems(5) ).print()

    env.execute("hot item top n")
  }



}
