package day04.demo

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author wsl
 * @Description
 * 热门商品topN  点击次数、下单次数、支付次数
 * 优化
 * 问题1：原始RDD算子用到了多次，最好提前缓存下。
 * 问题2：cogroup算子底层会走shuffle，效率比较低

 *
 */
object TopN_Hot2 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("rdd").setMaster("local[*]")
    val sc = new SparkContext(conf)

    val rdd: RDD[String] = sc.textFile("sparkcore/input/user_visit_action.txt")

    //优化提前缓存
    rdd.cache()

    //点击
    val clickRdd: RDD[(String, (Int, Int, Int))] = rdd.filter(
      click => {
        val data: Array[String] = click.split("_")
        data(6) != "-1"
      }
    ).map(
      click => {
        val data: Array[String] = click.split("_")
        (data(6), 1)
      }
    )
      .reduceByKey(_ + _)
      .map {
        case (id, cnt) => (id, (cnt, 0, 0))
      }

    //下单
    val orderRdd: RDD[(String, (Int, Int, Int))] = rdd.filter(
      click => {
        val data: Array[String] = click.split("_")
        data(8) != "null"
      }
    ).flatMap(
      click => {
        val data: Array[String] = click.split("_")
        val arr: Array[String] = data(8).split(",")
        arr.map((_, 1))
      }
    )
      .reduceByKey(_ + _)
      .map {
        case (id, cnt) => (id, (0, cnt, 0))
      }

    //支付
    val payRdd: RDD[(String, (Int, Int, Int))] = rdd.filter(
      click => {
        val data: Array[String] = click.split("_")
        data(10) != "null"
      }
    ).flatMap(
      click => {
        val data: Array[String] = click.split("_")
        val arr: Array[String] = data(10).split(",")
        arr.map((_, 1))
      }
    )
      .reduceByKey(_ + _)
      .map {
        case (id, cnt) => (id, (0, 0, cnt))
      }

    clickRdd.union(orderRdd).union(payRdd)
      .reduceByKey(
        (t1, t2) => {
          (t1._1 + t2._1, t1._2 + t2._2, t1._3 + t2._3)
        }
      )
      .sortBy(_._2, false).take(10)
      .foreach(println)


    sc.stop()

  }
}
