package com.wdl.networkflow.pv

import com.wdl.networkflow.topN.KafkaUtil
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer

object PageView {

  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    /** 设置并行度 */
    env.setParallelism(4)

    /** 设置时间语义 */
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    /** 读数据：文件形式 */
    val inputStream: DataStream[String] =
          env.readTextFile("E:\\bigdata_project\\UserBehaviorAnalysis\\HotItemsAnalysis\\src\\main\\resources\\UserBehavior.csv")
//      env.addSource( new FlinkKafkaConsumer[String](KafkaUtil.topic, new SimpleStringSchema(), KafkaUtil.kafkaConsumerProperties()) )

    val dataStream: DataStream[UserBehavior] = inputStream.map(
      data => {
        val dataArray: Array[String] = data.split(",")
        /** 数据：543462,1715,1464116,pv,  1511658000 */
        UserBehavior(dataArray(0).toLong, dataArray(1).toLong, dataArray(2).toInt, dataArray(3).toString, dataArray(4).toLong)
      })
      /** 设置要选取的事件时间 */
      .assignAscendingTimestamps(_.timestamp * 1000L)

    /** 分配 key，包装成二元组开创聚合 */
    val pvCountStream: DataStream[PvCountView] = dataStream.filter(_.behavior == "pv")
//      .map(data => {("pv", 1L)})
      .map(new PvCountMap())
      .keyBy(_._1)
      .timeWindow(Time.hours(1L))
      .aggregate(new PvCountAgg(), new PvCountWindowResult())

    /** 把各个分区的结果汇总起来 */
    val resultStream: DataStream[PvCountView] = pvCountStream.keyBy(_.windowEnd)
      .process(new TotalPvCountResult())

    resultStream.print("pv:")

    env.execute("pv count!")
  }

}