package com.log.anal.log

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.sql.functions._

object TopNStatJob {

  def main(args: Array[String]): Unit = {
    // 通过partitionColumnTypeInference设置是否转换分区字段的类型，默认是自动推断的
    val spark = SparkSession.builder().config("spark.sql.sources.partitionColumnTypeInference", "false").
      appName("SparkStatFormatJob").master("local[2]").getOrCreate()

    val df = spark.read.format("json").load("file:///Users/username/workspace_code/learn/spark-learn/datasets/tmp/log2/")

    topNCallPyFile(spark, df)
    spark.stop()

    /**
     * 统计在日志级别为DEBUG下，调用次数前topN的python文件
     */
    def topNCallPyFile(spark: SparkSession, df: DataFrame) = {
      import spark.implicits._
      val topNDF = df.filter(df.col("day").substr(0, 8) === "20190602" and
        df.col("infoLevel") === "DEBUG").groupBy(df.col("name")).
        agg(count("name").as("ct")).orderBy($"ct".desc)

      topNDF.show()
    }
  }
}
