package com.archgeek.spark.examples.v20210108

import org.apache.spark.{SPARK_BRANCH, SparkConf, SparkContext}

/**
 *
 * @author pizhihui
 * @date 2021-01-08
 */
object SparkPvUv {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("pv_uv test")
    val sc = new SparkContext(conf)
    sc.setLogLevel("ERROR")

    // 这里设置的并行度对与 job 的产生是有很大的影响的
    // val file = sc.textFile("data/pvuvdata", 5)
    val file = sc.textFile("data/pvuvdata")

    val pair = file.map(line => (line.split("\t")(5), 1))
    val reduce = pair.reduceByKey(_+_)
    val swap = reduce.map(_.swap)
    val sortBy = swap.sortByKey(ascending = false)
    val swap2 = sortBy.map(_.swap)
    val take = swap2.take(5)
    take.foreach(println)


    println("===============uv===================")

    val keys = file.map{ line =>
      val splits = line.split("\t")
      (splits(5), splits(0))
    }
    val distinct = keys.distinct()
    val pairx = distinct.map(x => (x._1, 1))
    val reducex = pairx.reduceByKey(_+_)
    val asc = reducex.sortBy(_._2, ascending = false)
    val takex = asc.take(5)
    takex.foreach(println)

    System.in.read()

  }
}
