package day3

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

object consume_SparkSQL {
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\新建文件夹\\HCIA初级\\hadoop")
    Logger.getLogger("org").setLevel(Level.OFF)
    val session = SparkSession.builder()
      .appName("SparkSQL-test")
      .master("local")
      .getOrCreate()
    import session.implicits._
    session.sparkContext.textFile("D:\\新建文件夹\\HCIA初级\\240722\\files\\consume.log")
      .map(i=>{
        val str = i.split(" ")
        (str(0),str(1),str(2),str(3),str(4))
      })
      //使用DataFrame
      //      .toDF("ID","username_ID","self","other","device_ID")
      //      .createOrReplaceTempView("consume")
      //
      //    val sql = "select device_ID," +
      //      "round(sum(self),2) as total_self," +
      //      "round(sum(other),2) as total_other," +
      //      "round(sum(self+other),2) as total " +
      //      "from consume group by device_ID"
      //使用DataSet
      .toDS()
      .createOrReplaceTempView("consume")
    val sql = "select _5 as Device_ID,round(sum(_3),2) as total_self," +
      "round(sum(_4),2) as total_other," +
      "round(sum(_3+_4),2) as total from consume" +
      " group by _5"
    session.sql(sql).show()

    session.close()
  }

}