package day2

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

object consume {
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\新建文件夹\\HCIA初级\\hadoop")
    Logger.getLogger("org").setLevel(Level.OFF)
    val sc = new SparkContext(new SparkConf().setAppName("Basic").setMaster("local"))

    val consume = sc.textFile("D:\\新建文件夹\\data\\consume.log").map(
      i => {
        val rdd = i.split(" ")
        (rdd(4), (
          BigDecimal(rdd(2)), BigDecimal(rdd(3))
        ))
        //使得key为一行当中的第五个数据：设备ID
        //使得value为一行当中的第三、四个数据：self、other
      }
    )
    //x:自营内容，y：第三方内容
    val sum = consume.reduceByKey((x: (BigDecimal, BigDecimal), y: (BigDecimal, BigDecimal)) =>
      (x._1 + y._1, x._2 + y._2)
    ).sortBy(i => i._2._2 + i._2._1, ascending = false)

    //输出：设备ID 自营总和 第三方总和 总和
    sum.foreach(i => {
      println(i._1 + " " + i._2._1 + " " + i._2._2 + " " + (i._2._1 + i._2._2))
    })

  }
}
