package org.huangrui.spark.scala.core.req

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author hr
 * @Create 2024-10-19 1:57 
 */
object HotCategoryTop10Analysis_1 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("spark")
    val sc = new SparkContext(conf)
    // Q : actionRDD重复使用
    // Q : cogroup性能可能较低
    // TODO 将文件作为数据源，对接RDD进行操作
    val data = sc.textFile("data/user_visit_action.txt")
    // TODO 1. 将多余的数据进行删除（过滤）处理
    val filterRdd = data.filter((line: String) => {
      val strings = line.split("_")
      "null".matches(strings(5))
    })
    // 2. 统计品类的点击数量：（品类ID，点击数量）
    val clickActionCount = filterRdd.filter((line: String) => {
      val splits = line.split("_")
      splits(6) != "null"
    }).map((line: String) => {
      val splits = line.split("_")
      (splits(6), 1)
    }).reduceByKey(_ + _)
    // -------------------------- 下单数量统计 -----------------------------------------
    val oerderActionCount = filterRdd.filter((line: String) => {
      val splits = line.split("_")
      splits(8) != "null"
    }).flatMap((line: String) => {
      val splits = line.split("_")
      splits(8).split(",").map((item: String) => {
        (item, 1)
      })
    }).reduceByKey(_ + _)
    // -------------------------- 支付数量统计 -----------------------------------------
    val payActionCount = filterRdd.filter((line: String) => {
      val splits = line.split("_")
      splits(10) != "null"
    }).flatMap((line: String) => {
      val splits = line.split("_")
      splits(10).split(",").map((item: String) => {
        (item, 1)
      })
    }).reduceByKey(_ + _)

    // (品类ID, 点击数量) => (品类ID, (点击数量, 0, 0))
    // (品类ID, 下单数量) => (品类ID, (0, 下单数量, 0))
    //                    => (品类ID, (点击数量, 下单数量, 0))
    // (品类ID, 支付数量) => (品类ID, (0, 0, 支付数量))
    //                    => (品类ID, (点击数量, 下单数量, 支付数量))
    // ( 品类ID, ( 点击数量, 下单数量, 支付数量 ))

    // 5. 将品类进行排序，并且取前10名
    //    点击数量排序，下单数量排序，支付数量排序
    //    元组排序：先比较第一个，再比较第二个，再比较第三个，依此类推
    //    ( 品类ID, ( 点击数量, 下单数量, 支付数量 ))
    val clickRdd: RDD[(String, (Int, Int, Int))] = clickActionCount.map {
      case (cid, cnt) => (cid, (cnt, 0, 0))
    }
    val orderRdd: RDD[(String, (Int, Int, Int))] = oerderActionCount.map {
      case (cid, cnt) => (cid, (0, cnt, 0))
    }
    val payRdd: RDD[(String, (Int, Int, Int))] = payActionCount.map {
      case (cid, cnt) => (cid, (0, 0, cnt))
    }
    // 将三个数据源合并在一起，统一进行聚合计算
    val sourceRdd: RDD[(String, (Int, Int, Int))] = clickRdd.union(orderRdd).union(payRdd)

    sourceRdd.reduceByKey((t1: (Int, Int, Int), t2: (Int, Int, Int)) => {
      Tuple3(t1._1 + t2._1, t1._2 + t2._2, t1._3 + t2._3)
    }).sortBy((t: (String, (Int, Int, Int))) => t._2, ascending = false)
      .take(10).foreach(println)
    sc.stop()
  }
}
