package day2

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

object consume {
  def main(args: Array[String]): Unit = {

    System.setProperty("hadoop.home.dir","D:\\hadoop")
    val sc = new SparkContext(new SparkConf().setAppName("Basic").setMaster("local"))
    Logger.getLogger("org").setLevel(Level.OFF)
//
//    def StringTOBigDecimal(i : String): BigDecimal = {
//      BigDecimal(i)
//    }

    val file = sc.textFile("D:\\data\\HCIP\\files\\consume.log").map(
      i => {
       val rdd = i.split(" ")
        (rdd(4),(BigDecimal(rdd(2)),BigDecimal(rdd(3))))
        //使得KEY为一行中的第五个数据,设备ID
        //使得value为一行当中的第三 四个数据:self other
        //使得value为Double类型
      }
    )

    val sum = file.reduceByKey((x :(BigDecimal,BigDecimal),y :(BigDecimal,BigDecimal)) => (x._1 + y._1,x._2+y._2)).sortByKey()

    sum.foreach(i =>{
      println(i._1+" "+i._2._1 + i._2._2+" " + (i._2._2+i._2._1))
    })
  }

}
