package com.log.anal.log

import org.apache.spark.sql.{SaveMode, SparkSession}

object SparkStatCleanJob {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("SparkStatFormatJob").master("local[2]").getOrCreate()

    val logRdd = spark.sparkContext.textFile("file:///Users/username/workspace_code/learn/spark-learn/datasets/tmp/log1/")

    // rdd => df
    val df = spark.createDataFrame(logRdd.map(line=>AccessConvertUtils.parseLog(line)), AccessConvertUtils.struct)

    // coalesce指定分区的个数， mode模式指定文件的覆盖形式
    df.coalesce(3).write.format("json").mode(SaveMode.Overwrite).partitionBy("day").save("file:///Users/username/workspace_code/learn/spark-learn/datasets/tmp/log2/")

    df.printSchema()
    df.show()

    spark.stop()
  }
}
