package com.yanggu.spark.core.demand.categoryTop10

import org.apache.spark.util.AccumulatorV2

import scala.collection.mutable

/**
 * 自定义累加器类继承自AccumulatorV2类
 * 重写6个抽象方法(核心方法add、merge方法)
 * 一般而言, 使用累加器都是对于各种sum、count等操作的优化。因为累加器不用进行shuffle
 * 累加器内部缓存了一些聚合的数据。
 */
class CategoryCountAccumulator extends AccumulatorV2[(String, String), mutable.Map[String, CategoryCount]]{

  var map: mutable.Map[String, CategoryCount] = mutable.Map[String, CategoryCount]()

  override def isZero: Boolean = map.isEmpty

  override def copy(): AccumulatorV2[(String, String), mutable.Map[String, CategoryCount]] = new CategoryCountAccumulator

  override def reset(): Unit = map.clear()

  override def add(v: (String, String)): Unit = {

    //1. 这里从元祖中提取出categoryId和actionType
    val (categoryId, actionType) = v

    //2. 获取对应的CategoryCount
    val categoryCount = map.getOrElse(categoryId, CategoryCount(0, 0, 0))

    //3. 使用模式匹配进行计算
    actionType match {
      case "click" => categoryCount.clickCount += 1
      case "order" => categoryCount.orderCount += 1
      case "pay" => categoryCount.payCount += 1
    }

    //4. 重新赋值
    map.put(categoryId, categoryCount)
  }

  override def merge(other: AccumulatorV2[(String, String), mutable.Map[String, CategoryCount]]): Unit = {
    //1. 这里合并两个map即可。这里的策略是将其他累加器的数据合并到当前累加器中
    val otherMap = other.value

    otherMap.foreach(kv => {
      //1. 这里从元祖中提取出categoryId和actionType
      val (categoryId, categoryCount1) = kv

      //2. 获取对应的CategoryCount
      val categoryCount2 = map.getOrElse(categoryId, CategoryCount(0, 0, 0))

      //3. 重新赋值
      categoryCount2.clickCount += categoryCount1.clickCount
      categoryCount2.orderCount += categoryCount1.orderCount
      categoryCount2.payCount += categoryCount1.payCount

      //4. 重新put进原来的map
      map.put(categoryId, categoryCount2)
    })
  }

  override def value: mutable.Map[String, CategoryCount] = map
}
