package cn.azzhu.proj

import java.sql.Timestamp

import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.shaded.guava18.com.google.common.hash.Funnels
//Flink内置的布隆过滤器
import org.apache.flink.shaded.guava18.com.google.common.hash.BloomFilter
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.util.Collector
import java.lang


/**
 * 如果访问量很大怎么办？这里的方法会把所有的PV数据放在窗口里面，然后去重 ===>增量聚合，使用布隆过滤器实现
 *  使用布隆过滤器实现UV统计
 * @author azzhu
 * @create 2020-09-24 21:27:00
 */
object UVByBloomFilter {
  case class UserBehaviour(userId:Long,
                           itemId:Long,
                           categoryId:Int,
                           behaviour: String,
                           timestamp:Long)

  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    //为了时间旅行，必须使用事件时间
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    val stream = env
      .readTextFile("D:\\bigdata\\flink-learning\\src\\main\\resources\\UserBehavior.csv")
      .map(line => {
        val arr = line.split(",")
        //todo 注意：时间戳单位必须是毫秒
        UserBehaviour(arr(0).toLong,arr(1).toLong,arr(2).toInt,arr(3),arr(4).toLong * 1000)
      })
      .filter(_.behaviour.equals("pv")) //过滤出pv事件
      .assignAscendingTimestamps(_.timestamp) //分配升序时间戳
      .map(r => ("key",r.userId))
      .keyBy(_._1)
      .timeWindow(Time.hours(1))
      .aggregate(new CountAgg,new WindowResult)
      .print()

    env.execute("UVAgg")
  }

  class CountAgg extends AggregateFunction[(String,Long),(Long,BloomFilter[lang.Long]),Long] {
    override def createAccumulator(): (Long, BloomFilter[lang.Long]) = {
      //第一个参数：指定了布隆过滤器要过滤的数据类型是long
      //第二个参数：指定了大概有多少个不同的元素需要去重，这里设置了100万,也就是说假设有100万不同的用户
      //第三个参数：误报率，这里设置了1%
      (0L,BloomFilter.create(Funnels.longFunnel(), 1000000, 0.01))
    }

    override def add(value: (String, Long), acc: (Long, BloomFilter[lang.Long])): (Long, BloomFilter[lang.Long]) = {
      var bloom = acc._2
      var uvCount = acc._1
      //如果布隆过滤器没有碰到过value._2这个userId
      if(!bloom.mightContain(value._2)) {
        //写入布隆过滤器
        bloom.put(value._2)
        uvCount += 1
      }
      (uvCount,bloom)
    }

    override def getResult(acc: (Long, BloomFilter[lang.Long])): Long = acc._1

    override def merge(acc: (Long, BloomFilter[lang.Long]), acc1: (Long, BloomFilter[lang.Long])): (Long, BloomFilter[lang.Long]) = ???
  }

  class WindowResult extends ProcessWindowFunction[Long,String,String,TimeWindow] {
    override def process(key: String, context: Context, elements: Iterable[Long], out: Collector[String]): Unit = {
      out.collect("窗口结束时间为： "+new Timestamp(context.window.getEnd) + " 的窗口的UV统计值是：" + elements.head)
    }
  }
}
