package com.catmiao.spark.req

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @title: Spark01_Req1_HotCategoryTop10Analysis
 * @projectName spark_study
 * @description: TODO
 * @author ChengMiao
 * @date 2024/3/11 23:14
 */
object Spark02_Req1_HotCategoryTop10Session {

  def main(args: Array[String]): Unit = {


    // TODO 1. top10 热门品类
    val conf = new SparkConf().setMaster("local[*]").setAppName("HotCategoryTop10")

    val sc = new SparkContext(conf)



    // 1. 读取原始的日志数据
    val rdd = sc.textFile("datas/req_record/user_visit_action.txt")


    // TODO 问题1：rdd重复使用
    rdd.cache() // 放入缓存
    val top10: Array[String] = top10Category(rdd)

    // 1. 过滤原始数据，保留点击和前10的品类ID
    val filterRdd: RDD[String] = rdd.filter(
      item => {
        val datas = item.split("_")
        if (datas(6) != "-1") {
          top10.contains(datas(6))
        } else {
          false
        }
      }
    )

    // 2. 根据品类id + sessionId 进行点击量的统计
    val reduceRdd: RDD[((String, String), Int)] = filterRdd.map(
      action => {
        val datas = action.split("_")
        ((datas(6), datas(2)), 1)
      }
    ).reduceByKey(_ + _)

    // 3. 转换结构
    val mapRdd = reduceRdd.map{
      case ((cid,sid),sum) => {
        (cid,(sid,sum))
      }
    }

    // 4. 相同品类进行分组
    val groupRdd: RDD[(String, Iterable[(String, Int)])] = mapRdd.groupByKey()

    // 5. 分组后进行点击量排序，取前十
    val result: RDD[(String, List[(String, Int)])] = groupRdd.mapValues(
      iter => {
        iter.toList.sortBy(_._2)(Ordering.Int.reverse).take(10)
      }
    )

    result.collect().foreach(println)

    sc.stop()
  }

  def top10Category(rdd: RDD[String]): Array[String] = {

    // 2. 统计品类的点击数量： (品类id,点击数量)
    // 过滤数据
    val clickRdd = rdd.filter(
      item => {
        val datas = item.split("_")
        datas(6) != "-1"
      }
    )

    val clickCountRdd: RDD[(String, Int)] = clickRdd.map(
      item => {
        val datas = item.split("_")
        (datas(6), 1)
      }
    ).reduceByKey(_ + _)


    // 3. 统计品类的下单数量： (品类id,下单数量)
    val orderRdd = rdd.filter(
      item => {
        val datas = item.split("_")
        datas(8) != "null"
      }
    )

    val orderCountRdd: RDD[(String, Int)] = orderRdd.flatMap(
      item => {
        val datas = item.split("_")
        val ids = datas(8)
        val cids = ids.split(",")
        cids.map(id => {
          (id, 1)
        })
      }
    ).reduceByKey(_ + _)

    //    orderCountRdd.foreach(println)

    // 4. 统计品类的支付数量： (品类id,支付数量)
    val payRdd = rdd.filter(
      item => {
        val datas = item.split("_")
        datas(10) != "null"
      }
    )

    val payCountRdd: RDD[(String, Int)] = payRdd.flatMap(
      item => {
        val datas = item.split("_")
        val ids = datas(10)
        val cids = ids.split(",")
        cids.map(id => (id, 1))
      }
    ).reduceByKey(_ + _)


    // 5. 排序品类，获取前10名 (品类id,(点击数，下单数，支付数))
    // cogroup = connect + group
    val cogroupRdd: RDD[(String, (Iterable[Int], Iterable[Int], Iterable[Int]))] = clickCountRdd.cogroup(orderCountRdd, payCountRdd)

    // TODO 问题2：corgroup数据源分区规则不一致时存在shuffle,性能过低
    // TODO 解决：变更结构，然后合并 (品类id,(点击数,下单数,支付数))
    val result = cogroupRdd.mapValues {
      case (clickIter, orderIter, payIter) => {

        var clickCount = 0
        var orderCount = 0
        var payCount = 0


        val iter1 = clickIter.iterator
        if (iter1.hasNext) {
          clickCount = iter1.next()
        }
        val iter2 = orderIter.iterator
        if (iter2.hasNext) {
          orderCount = iter2.next()
        }

        val iter3 = payIter.iterator
        if (iter3.hasNext) {
          payCount = iter3.next()
        }
        (clickCount, orderCount, payCount)
      }
    }

    // 排序
    val tuples = result.sortBy(_._2, false).take(10).map(_._1)

     tuples
  }

}
