package com.xzx.spark.core.exercise

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *
 * 需求 2：Top10 热门品类中每个品类的 Top10 活跃 Session 统计
 * (在需求一的基础上，增加每个品类用户 session 的点击统计)
 *
 * @author xinzhixuan
 * @version 1.0
 * @date 2021-08-08 2:14 下午
 */
object Exercise03 {
  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setMaster("local[*]").setAppName(getClass.getSimpleName))
    val rdd = sc.textFile("src/main/scala/com/xzx/spark/core/exercise/user_visit_action.txt")
      .map(x => {
        val data = x.split("_")
        UserVisitAction(data(0), data(1).toLong, data(2), data(3).toLong, data(4), data(5), data(6).toLong, data(7).toLong, data(8), data(9), data(10), data(11), data(12).toLong)
      }).cache()
    //    rdd.foreach(println)
    val categoryIds: Array[Long] = getTopN(rdd)
    println(categoryIds.mkString(","))

    println("================")
    rdd.filter(x => categoryIds.contains(x.click_category_id))
      .map(x=>((x.click_category_id, x.session_id), 1))
      .reduceByKey(_+_)
      .map(x=>(x._1._1, (x._1._2, x._2)))
      .groupByKey()
      .mapValues(itr=> itr.toList.sortBy(x=>x._2)(Ordering.Int.reverse).take(10))
      .foreach(println)

    sc.stop()
  }


  def getTopN(rdd: RDD[UserVisitAction]) = {
    val result = rdd.flatMap(x => {
      if (x.click_category_id != -1) {
        List((x.click_category_id, (1, 0, 0)))
      } else if (x.order_category_ids != "null") {
        val tuples: Array[(Long, (Int, Int, Int))] = x.order_category_ids.split(",").map(x => (x.toLong, (0, 1, 0)))
        tuples.toList
      } else if (x.pay_category_ids != "null") {
        val tuples: Array[(Long, (Int, Int, Int))] = x.pay_category_ids.split(",").map(x => (x.toLong, (0, 0, 1)))
        tuples.toList
      } else {
        Nil
      }
    })
      .reduceByKey {
        case (t1, t2) => (t1._1 + t2._1, t1._2 + t2._2, t1._3 + t2._3)
      }
    result.sortBy(_._2, ascending = false).take(10).map(_._1)
  }

}

