package com.atguigu.day9

import java.sql.Timestamp
import org.apache.flink.api.common.functions.AggregateFunction
//flink内置的布隆过滤器
import org.apache.flink.shaded.guava18.com.google.common.hash.{BloomFilter, Funnels}
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.api.scala.function.ProcessWindowFunction
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.util.Collector
import java.lang.{Long => JLong}

object UVbyBloomFilter {

  case class UserBehaviour(
                            userId: Long,
                            itemId: Long,
                            categoryId: Int,
                            behaviour: String,
                            timestamp: Long
                          )

  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

    val stream = env
      .readTextFile("D:\\job\\idea\\idea2018_workspces\\flink\\src\\main\\resources\\UserBehavior.csv")
      .map(line => {
        var arr = line.split(",")
        UserBehaviour(arr(0).toLong, arr(1).toLong, arr(2).toInt, arr(3), arr(4).toLong * 1000L)
      })
      .filter(_.behaviour.equals("pv")) //过滤出pv事件
      .assignAscendingTimestamps(_.timestamp) //分配升序时间戳
      .map(r => ("key", r.userId))
      .keyBy(_._1)
      .timeWindow(Time.hours(1))
      .aggregate(new CountAgg, new WindowResult)

    stream.print()
    env.execute()

  }

class CountAgg extends AggregateFunction[(String,Long),(Long,BloomFilter[JLong]),Long]{
  override def createAccumulator(): (Long, BloomFilter[JLong]) = {
    //第一个参数：指定了布隆过滤器要过滤的数据类型是Long
    //第二个参数：指定了大概有多少不同元素需要去重，这里设置为1000000
    //第三个参数：误报率，这里设置了1%
    (0L,BloomFilter.create(Funnels.longFunnel(),1000000,0.01))
  }

  override def add(in: (String, Long), acc: (Long, BloomFilter[JLong])): (Long, BloomFilter[JLong]) = {
    var bloom = acc._2
    var uvCount = acc._1
    //如果布隆过滤器没有碰到过value._2这个usserid
    if (!bloom.mightContain(in._2)){
      bloom.put(in._2)//写入布隆过滤器
      uvCount += 1
    }
    (uvCount,bloom)
  }

  override def getResult(acc: (Long, BloomFilter[JLong]))= acc._1

  override def merge(acc: (Long, BloomFilter[JLong]), acc1: (Long, BloomFilter[JLong])): (Long, BloomFilter[JLong]) = ???
}

  //如果滑动窗口时1小时，距离是5秒，每小时用户数量是10亿呢？还管用吗
  //也就是说每小时的UV是10亿，去重玩以后Set里面都是有10亿个userid
  //每个userid是1kb，10亿个userid是多少？1T的数据；每隔5s就会产生1T的数据
  class WindowResult extends ProcessWindowFunction[Long,String,String,TimeWindow]{
    override def process(key: String, context: Context, elements: Iterable[Long], out: Collector[String]): Unit = {
      out.collect("窗口结束时间为："+ new Timestamp(context.window.getEnd)+"的窗口的UV统计值是"+elements.head)
    }}

}