package com.imooc.log

import org.apache.spark.sql.{SaveMode, SparkSession}

//$SPARK_HOME/bin/spark-submit \
//--class com.imooc.log.SparkStatCleanJobYARN \
//--name SparkStatCleanJobYARN \
//--master yarn \
//--executor-memory 1G \
//--num-executors 1 \
//--files file:///home/hadoop/data/ipDatabase.csv,file:///home/hadoop/data/ipRegion.xlsx \
//  /home/hadoop/lib/sql-1.0-jar-with-dependencies.jar \
//  hdfs://hadoop000:8020/imooc/input/* hdfs://hadoop000:8020/imooc/clean


/**
 * 使用Spark完成我们的数据清洗操作：运行在YARN之上
 */
object SparkStatCleanJobYARN {
  def main(args: Array[String]): Unit = {
    if (args.length != 2) {
      println("Usage: SparkStatCleanJobYARN <inputPath> <outputPath>")
      System.exit(1)
    }

    val Array(inputPath, outputPath) = args

    val spark = SparkSession
      .builder()
      .getOrCreate()

    val accessRDD = spark.sparkContext.textFile(inputPath)

    //    accessRDD.take(10).foreach(println)

    //RDD ==> DF
    val rowRDD = accessRDD.map(x => AccessConvertUtils.parseLog(x)).filter(x => x.length > 1)
    val accessDF = spark.createDataFrame(rowRDD, AccessConvertUtils.struct)
    //    accessDF.printSchema()
    //    accessDF.show(10)

    accessDF
      .coalesce(1)
      .write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .partitionBy("day")
      .save(outputPath)

    spark.stop()

  }


}

