package com.atguigu.pj.app

import com.atguigu.cm.util.StrUtil
import com.atguigu.pj.bean.{CategoryCountInfo, CategorySession, UserVisitAction}
import com.atguigu.pj.partitioner.SessionTop10Partition
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

import scala.collection.mutable
import scala.collection.mutable.ListBuffer

/**
 * description ：Top10热门品类中每个品类的 Top10 活跃 Session 统计，这里只关注点击次数
 * author      ：剧情再美终是戏 
 * mail        : 13286520398@163.com
 * date        ：Created in 2020/1/10 7:53
 * modified By ：
 * version:    : 1.0
 */
object QuerySessionTop10ByCategory {


  /**
   * Top10热门品类中每个品类的 Top10 活跃（tolist全排序）
   *
   * @Author 剧情再美终是戏
   * @Date 2020/1/10 15:24
   * @param sc                sparkContext
   * @param list              源数据
   * @param categoryTop10List Top10热门品类
   * @return org.apache.spark.rdd.RDD<scala.collection.immutable.List<com.atguigu.pj.bean.CategorySession>>
   * @Version 1.0
   **/
  def doQueryAllSort(sc: SparkContext, list: RDD[UserVisitAction],
                     categoryTop10List: Seq[CategoryCountInfo]): RDD[List[CategorySession]] = {

    // categoryTop10List 获取id
    val categoryTop10Cids: Seq[String] = categoryTop10List.map(_.categoryId)

    // 过滤数据
    val filterRdd: RDD[UserVisitAction] = list.filter(x => categoryTop10Cids.contains(x.click_category_id.toString))

    // cid, session,1; cid2, session,1 --> reduceByKey
    val cidSessionOne: RDD[((Long, String), Int)] = filterRdd.map(x => ((x.click_category_id, x.session_id), 1))

    //  cid, session,count  --> map
    val cidSessionCount: RDD[((Long, String), Int)] = cidSessionOne.reduceByKey(_ + _)

    // cid, (session, count); cid2, (session, count) -->  groupbykey
    val cidAndSidCount: RDD[(Long, (String, Int))] = cidSessionCount.map {
      case ((cid, sid), count) => (cid, (sid, count))
    }

    // cid,List((session, count),(session, count),(session, count)....) --> 对value进行sortby, take10
    val cidAndSidCountList: RDD[(Long, Iterable[(String, Int)])] = cidAndSidCount.groupByKey()

    // TODO 这里取top10的时候使用的是toList,有数据量大的时候可以会出现内存溢出的问题
    // result --> sortby --> take10
    val resultRdd: RDD[(Long, List[(String, Int)])] = cidAndSidCountList.mapValues(_.toList.sortBy(_._2)(Ordering.Int.reverse).take(10))


    // resultRdd --> RDD[CategorySession]
    val result: RDD[List[CategorySession]] = resultRdd.map {
      case (cid, it) =>
        it.map {
          case (sid, count) => CategorySession(cid.toString, sid, count)
        }
    }.filter(x => x.head != null && StrUtil.isNotEmpty(x.head.categoryId)).sortBy(_.head.categoryId.toInt)

    // 返回结果
    result
  }


  /**
   * Top10热门品类中每个品类的 Top10 活跃(分区内排序)
   *
   * @Author 剧情再美终是戏
   * @Date 2020/1/10 15:24
   * @param sc                sparkContext
   * @param list              源数据
   * @param categoryTop10List Top10热门品类
   * @return org.apache.spark.rdd.RDD<scala.collection.immutable.List<com.atguigu.pj.bean.CategorySession>>
   * @Version 1.0
   **/
  def doQuerySingelPartitionSort(sc: SparkContext, list: RDD[UserVisitAction],
                                 categoryTop10List: Seq[CategoryCountInfo]) = {

    // categoryTop10List 获取id
    val categoryTop10Cids: Seq[String] = categoryTop10List.map(_.categoryId)

    // 过滤数据
    val filterRdd: RDD[UserVisitAction] = list.filter(x => categoryTop10Cids.contains(x.click_category_id.toString))

    // cid, session,1; cid2, session,1 --> reduceByKey
    val cidSessionOne: RDD[((Long, String), Int)] = filterRdd.map(x => ((x.click_category_id, x.session_id), 1))

    // 对categoryTop10Cids进行一个拉链，获取下标，方便后面自定义分区，分区内排序
    val categoryTop10CidsIndex: Map[String, Int] = categoryTop10Cids.zipWithIndex.toMap

    //  cid, session,count  --> map,并绑定一个自定义的分区器
    val cidSessionCount: RDD[((Long, String), Int)] = cidSessionOne.reduceByKey(new SessionTop10Partition(categoryTop10CidsIndex), _ + _)

    // cid, (session, count); cid2, (session, count)
    val cidAndSidCount: RDD[(Long, (String, Int))] = cidSessionCount.map {
      case ((cid, sid), count) => (cid, (sid, count))
    }

    // result --> sortby --> take10, 这里不需要分组了，因为之前已经将相同id的放在同一个分区里面了
    val resultRdd: RDD[(Long, (String, Int))] = cidAndSidCount.mapPartitions(_.toList.sortBy(_._2._2)(Ordering.Int.reverse).take(10).toIterator)

    // resultRdd --> RDD[CategorySession]
    val result: RDD[CategorySession] = resultRdd.map {
      case (cid, (sid, count)) => CategorySession(cid.toString, sid, count)
    }.filter(x => x != null && StrUtil.isNotEmpty(x.categoryId)).sortBy(_.categoryId.toInt)

    // 这个排序已经失效了,因为数据只能在这里收集
    //.filter(x => x != null && StrUtil.isNotEmpty(x.categoryId)).sortBy(_.categoryId.toInt)

    // 输出结果
    result.foreachPartition {
      val list = ListBuffer[CategorySession]()
      it =>
        it.foreach(x => list += x)
        println(list)
    }

    // 返回结果
    result
  }


  /**
   * Top10热门品类中每个品类的 Top10 活跃(Treeset排序)
   *
   * @Author 剧情再美终是戏
   * @Date 2020/1/10 15:24
   * @param sc                sparkContext
   * @param list              源数据
   * @param categoryTop10List Top10热门品类
   * @return org.apache.spark.rdd.RDD<scala.collection.immutable.List<com.atguigu.pj.bean.CategorySession>>
   * @Version 1.0
   **/
  def doQueryTreeset(sc: SparkContext, list: RDD[UserVisitAction],
                     categoryTop10List: Seq[CategoryCountInfo]) = {

    // categoryTop10List 获取id
    val categoryTop10Cids: Seq[String] = categoryTop10List.map(_.categoryId)

    // 过滤数据
    val filterRdd: RDD[UserVisitAction] = list.filter(x => categoryTop10Cids.contains(x.click_category_id.toString))

    // cid, session,1; cid2, session,1 --> reduceByKey
    val cidSessionOne: RDD[((Long, String), Int)] = filterRdd.map(x => ((x.click_category_id, x.session_id), 1))

    // 对categoryTop10Cids进行一个拉链，获取下标，方便后面自定义分区，分区内排序
    val categoryTop10CidsIndex: Map[String, Int] = categoryTop10Cids.zipWithIndex.toMap

    //  cid, session,count  --> map,并绑定一个自定义的分区器
    val cidSessionCount: RDD[((Long, String), Int)] = cidSessionOne.reduceByKey(new SessionTop10Partition(categoryTop10CidsIndex), _ + _)

    // cid, (session, count); cid2, (session, count) --> RDD[CategorySession]
    val cidAndSidCount: RDD[CategorySession] = cidSessionCount.map {
      case ((cid, sid), count) => CategorySession(cid.toString, sid, count)
    }

    implicit val rdd = new Ordering[CategorySession] {
      // TODO x.clickCount.toInt可能会溢出，所以不能这样操作
      //override def compare(x: CategorySession, y: CategorySession): Int = if (x.clickCount.toInt >= y.clickCount.toInt) -1 else 1
      override def compare(x: CategorySession, y: CategorySession): Int = if (x.clickCount >= y.clickCount) -1 else 1
    }
    // 对RDD[CategorySession]结果使用treeSet排序，及取top10
    val result: RDD[CategorySession] = cidAndSidCount.mapPartitions {
      var treeset: mutable.Set[CategorySession] = mutable.TreeSet[CategorySession]() // (Ordering[CategorySession])
      it =>
        it.foreach {
          case (cs: CategorySession) =>
            treeset.add(cs)
            if (treeset.size > 10) treeset = treeset.take(10)
        }
        treeset.toIterator
    }

    // 输出结果
    result.foreachPartition {
      val list = ListBuffer[CategorySession]()
      it =>
        it.foreach(x => list += x)
        println(list)
    }

    // 返回结果
    result
  }
}

/**
 * Top10热门品类中每个品类的 Top10 活跃 Session 统计，这里只关注点击次数
 * (cid, session),1; (cid, session),1, reduceByKey
 * cid, session,count map
 * cid, (session, count); cid2, (session, count)... groupbykey
 * cid,List((session, count))
 **/

