package day4

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.Row.empty.schema
import org.apache.spark.sql.functions.udf

import java.text.SimpleDateFormat
import java.util.TimeZone

object time_consume {
  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\hadoop")
    Logger.getLogger("org").setLevel(Level.OFF)

    val session = SparkSession.builder()
      .appName("SparkSQL-test")
      .master("local")
      .getOrCreate()

    //生成一个表
    import session.implicits._
    session.sparkContext.textFile("D:\\data\\HCIP\\files\\time_consume.log")
      .map( i=>{
        val list = i.split("\t")
        (list(1),list(2).toLong,list(3))
      })
      .toDF("username","time","amount")
      .createOrReplaceTempView("time_consume")
//    session.sql("select * from time_consume").show(100)

//    session.sql("select username , " +
//      "concat_ws(',',collect_list(CAST(time as STRING))) as times ," +
//      "round(sum(amount),2) as total "+
//      "from time_consume "+
//      "group by username").show()

//    val timeChange = udf((time: Long) =>{
//      val nyr = new SimpleDateFormat("yyyy-MM-dd")
//      nyr.setTimeZone(TimeZone.getTimeZone("CST"))
//      nyr.format(time * 1000)
//    })
    //注册这个方法
//    session.udf.register("timeChange",timeChange)

    //创建sqp，并使用新建的udf方法

//    val sql = "select username,"+
//      "concat_ws(',',collect_list(timeChange(time))) AS times, " +
//      "round(sum(amount),2) as total " +
//      "from time_consume group by username"
//    session.sql(sql).show(30,false)

//直接使用DF进行捕获

    session.read.format("text")
      .option("tab","\t")
      .schema(schema)
      .csv("D:\\data\\HCIP\\files\\time_consume.log")
      .createOrReplaceTempView("time_consume")
    session.sql("select * from time_consume").show()



  }

}
