package com.core.app
import com.core.bean.{CategoryCountInfo, CategorySession, UserVisitAction}
import com.core.util.MyPartitioner
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

import scala.collection.mutable
object CategorySessionApp {
  def statCategoryTop10Session(sc: SparkContext, userVisitActionRDD: RDD[UserVisitAction], categoryTop10: List[CategoryCountInfo]) = {
    // 1. 得到top10的品类的id
    val categoryIdTop10: List[String] = categoryTop10.map(_.categoryId)
    // 2. 过去出来只包含 top10 品类id的那些用户行为
    val filteredUserVisitActionRDD: RDD[UserVisitAction] = userVisitActionRDD.filter(UserVisitAction => {
      categoryIdTop10.contains(UserVisitAction.click_category_id.toString)
    })
    // 3. 聚合操作
    //  => RDD[(品类id, sessionId))] map
    //    => RDD[(品类id, sessionId), 1)]
    val categorySessionOne: RDD[((Long, String), Int)] = filteredUserVisitActionRDD
      .map(userVisitAction => ((userVisitAction.click_category_id, userVisitAction.session_id), 1))
    // RDD[(品类id, sessionId), count)]
    val categorySessionCount: RDD[(Long, (String, Int))] =
      categorySessionOne.reduceByKey(_ + _).map {
        case ((cid, sid), count) => (cid, (sid, count))
      }
    // 4. 按照品类 id 进行分组
    // RDD[品类id, Iterator[(sessionId, count)]]
    val categorySessionCountGrouped: RDD[(Long, Iterable[(String, Int)])] = categorySessionCount.groupByKey

    // 5. 排序取前 10
    val categorySessionRDD: RDD[CategorySession] = categorySessionCountGrouped.flatMap {
      case (cid, it) => {
        val list: List[(String, Int)] = it.toList.sortBy(_._2)(Ordering.Int.reverse).take(10)
        val result: List[CategorySession] = list.map {
          case (sid, count) => CategorySession(cid.toString, sid, count)
        }
        result
      }
    }
    categorySessionRDD.collect.foreach(println)
  }

  def statCategoryTop10Session_1(sc: SparkContext, userVisitActionRDD: RDD[UserVisitAction], categoryTop10: List[CategoryCountInfo]) = {
    // 1. 得到top10的品类的id
    val categoryIdTop10: List[String] = categoryTop10.map(_.categoryId)
    // 2. 过去出来只包含 top10 品类id的那些用户行为
    val filteredUserVisitActionRDD: RDD[UserVisitAction] = userVisitActionRDD.filter(UserVisitAction => {
      categoryIdTop10.contains(UserVisitAction.click_category_id.toString)
    })
    // 3. 聚合操作
    //  => RDD[(品类id, sessionId))] map
    //    => RDD[(品类id, sessionId), 1)]
    val categorySessionOne: RDD[((Long, String), Int)] = filteredUserVisitActionRDD
      .map(userVisitAction => ((userVisitAction.click_category_id, userVisitAction.session_id), 1))
    // RDD[(品类id, sessionId), count)]
    val categorySessionCount: RDD[(Long, (String, Int))] =
      categorySessionOne.reduceByKey(_ + _).map {
        case ((cid, sid), count) => (cid, (sid, count))
      }

    // 4. 每个品类 id 排序取前 10的 session
    categoryIdTop10.foreach(cid => {
      // 针对某个具体的 CategoryId, 过滤出来只只包含这个CategoryId的 RDD, 然后整体j降序p排列
      val top10: Array[CategorySession] = categorySessionCount
        .filter(_._1 == cid.toLong)
        .sortBy(_._2._2, ascending = false)
        .take(10)
        .map {
          case (cid, (sid, count)) => CategorySession(cid.toString, sid, count)
        }
      top10.foreach(println)

    })

  }

  def statCategoryTop10Session_2(sc: SparkContext, userVisitActionRDD: RDD[UserVisitAction], categoryTop10: List[CategoryCountInfo]) = {
    // 1. 得到top10的品类的id
    val categoryIdTop10: List[String] = categoryTop10.map(_.categoryId)
    // 2. 过去出来只包含 top10 品类id的那些用户行为
    val filteredUserVisitActionRDD: RDD[UserVisitAction] = userVisitActionRDD.filter(UserVisitAction => {
      categoryIdTop10.contains(UserVisitAction.click_category_id.toString)
    })
    // 3. 聚合操作
    //  => RDD[(品类id, sessionId))] map
    //    => RDD[(品类id, sessionId), 1)]
    val categorySessionOne: RDD[((Long, String), Int)] = filteredUserVisitActionRDD
      .map(userVisitAction => ((userVisitAction.click_category_id, userVisitAction.session_id), 1))
    // RDD[(品类id, sessionId), count)]  在 reduceByKey 的时候指定分区器
    val categorySessionCount: RDD[CategorySession] = categorySessionOne
      .reduceByKey(new MyPartitioner(categoryIdTop10), _ + _) //  指定分区器  (相比以前有变化)
      .map {
        case ((cid, sid), count) => CategorySession(cid.toString, sid, count)
      }

    // 4. 对每个分区内的数据排序取前 10(相比以前有变化)
    val categorySessionRDD: RDD[CategorySession] = categorySessionCount.mapPartitions(it => {

      // 这个时候也不要把 it 变化 list 之后再排序, 否则仍然会有可能出现内存溢出.
      // 我们可以把数据存储到能够自动排序的集合中 比如 TreeSet 或者 TreeMap 中, 并且永远保持这个集合的长度为 10
      // 让TreeSet默认按照 count 的降序排列, 需要让CategorySession实现 Ordered 接口(Comparator)
      var top10: mutable.TreeSet[CategorySession] = mutable.TreeSet[CategorySession]()

      it.foreach(cs => {
        top10 += cs // 把 CategorySession 添加到 TreeSet 中
        if (top10.size > 10) { // 如果 TreeSet 的长度超过 10, 则移除最后一个
          top10 = top10.take(10)
        }
      })
      top10.toIterator
    })
    categorySessionRDD.collect.foreach(println)

  }
}
