package com.jinghang.logProject.App_yarn

import com.jinghang.logProject.utils.LogConvertUtil
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}

/**
  * 第二步清洗：使用第一步清洗的数据作为数据源，继续清洗数据。
  */
object _020_SparkStatCleanApp {

  def main(args: Array[String]): Unit = {


    val spark =
      SparkSession
        .builder()
        .appName("SparkCleanJob2")
        .getOrCreate()

    val path = args(0)
    val logRDD = spark.sparkContext.textFile(path,4)
    logRDD.take(20).foreach(println)

    //rdd ==> df
    val rowRDD: RDD[Row] = logRDD.map(x => LogConvertUtil.parseLog(x))

    println("rowRDD")
    rowRDD.take(20).foreach(println)

    val schema: StructType = LogConvertUtil.struct
    val logDataFrame: DataFrame = spark.createDataFrame(rowRDD, schema)

    logDataFrame.printSchema()
    logDataFrame.show()


    logDataFrame
      .write
      .mode(SaveMode.Overwrite)//覆盖原文件
      .format("json")
      .save(args(1))

    spark.stop
  }

}
