package day4

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StringType, StructType}

object time_consume_2 {
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\hadoop")
    Logger.getLogger("org").setLevel(Level.OFF)

    val session = SparkSession.builder()
      .appName("Spark Streaming")
      .master("local[*]")
      .getOrCreate()

    val fileSchema = new StructType()
      .add("log_ID",IntegerType)
      .add("name_ID",StringType)
      .add("time",LongType)
      .add("cost",DoubleType)

    val line = session.readStream.format("csv")
      .schema(fileSchema)
      .option("delimiter","\t")
      // spark streaming需要使用目录
      .load("D:\\data\\HCIP\\files")

    import session.implicits._
    import org.apache.spark.sql.functions._

    //val get = line.select("name_ID", "time","cost")
    val get = line.groupBy("name_ID").agg(collect_set("time"),sum("cost"))
//        val get = line.groupBy("name_ID")
//          .agg("time").alias("total_self"),
//            round(sum("other"),2).alias("total_other"),
//            (round(sum("self"),2)+round(sum("other"),2)).alias("total"))
//    val get = line.select("name_ID").as[String].flatMap(_.split(" "))

    val put = get.writeStream
      .format("console")
      .outputMode("update")
      .trigger(Trigger.ProcessingTime("3 seconds"))

    put.start().awaitTermination()

  }

}
