package day3

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

object consume_SparkSQL {
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\devtools\\hadoop")
    Logger.getLogger("org").setLevel(Level.OFF)

    val session = SparkSession.builder()
      .appName("SparkSQL-test")
      .master("local")
      .getOrCreate()
    import session.implicits._
    session.sparkContext.textFile("D:\\data\\HCIP\\files\\consume.log")
      .map(i => {
        val str = i.split(" ")
        (str(0),str(1),str(2),str(3),str(4))
      })
      .toDS()
      .createOrReplaceTempView("consume")
    val sql = "select _5,round(sum(_3),2),round(sum(_4),2),round(sum(_3+_4),2) from consume group by _5"
//      .toDF("ID","username_ID","self","other","device_ID")
//      .createOrReplaceTempView("consume")
//    val sql = "select device_ID," +
//      "sum(self) as total_self,"+
//      "sum(other) as total_other,"+
//      "sum(self+other) as total " +
//      "from consume group by device_ID"
    session.sql(sql).show()

  }

}
