package com.feiwei.hotitem

import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.runtime.aggregate.AggregateAggFunction


case class UserBehavior(userId:Long,itemId:Long ,catagorysId:Long,behavior:String,timestamp:Long)



object HotItems {


  def main(args: Array[String]): Unit = {

    val env=StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val dataStream=env.readTextFile("E:\\repository\\company\\myself\\flink-learning\\flink-learing-user-behavior-analysis\\HotItemsAnalysis\\src\\main\\resources\\UserBehavior.csv")
      .map(data=>{
          val dataArray = data.split(",")
      UserBehavior(dataArray(0).trim.toLong,dataArray(1).trim.toLong,dataArray(2).trim.toLong,dataArray(3).trim,dataArray(4).trim.toLong)
      })

      //分配递增的时间戳
      //已知时间是单调递增，就指定为数据流的时间戳
      .assignAscendingTimestamps(_.timestamp*1000L)

   /* dataStream.filter(_.behavior=="pv")
        .keyBy("itemId")
        .timeWindow(Time.hours(1),Time.minutes(5))
        .aggregate(new CountAgg(),new WindowResult())*/
    dataStream.print()
    env.execute()
  }

//与聚合函数
  class  CountAgg() extends AggregateFunction[UserBehavior,Long,Long]{

    override def createAccumulator(): Long = 0L

    override def add(value: UserBehavior, accumulator: Long): Long = accumulator+1

    override def getResult(accumulator: Long): Long = accumulator

    override def merge(a: Long, b: Long): Long = a+b
  }



}
