package day4

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StringType, StructType}

object consume {
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\devtools\\hadoop")
    Logger.getLogger("org").setLevel(Level.OFF)

    val session = SparkSession.builder()
      .appName("Spark Streaming")
      .master("local[*]")
      .getOrCreate()

    val fileSchema = new StructType()
      .add("ID",IntegerType)
      .add("name_ID",LongType)
      .add("self",DoubleType)
      .add("other",DoubleType)
      .add("Device",StringType)

    val line = session.readStream.format("csv")
      .schema(fileSchema)
      .option("delimiter"," ")
      // spark streaming需要使用目录
      .load("D:\\data\\files\\wordCount")

    import session.implicits._
    import org.apache.spark.sql.functions._
    //填充对于读取到的DataFrame数据处理
    val get = line.groupBy("Device")
      .agg(sum("self").alias("total_self"),
        sum("other").alias("total_other"),
        (sum("self")+sum("other")).alias("total"))

    val put = get.writeStream
      .format("console")
      .outputMode("update")
      .trigger(Trigger.ProcessingTime("3 seconds"))

    put.start().awaitTermination()

  }
}
