package com.xiaoxu.logProject.App

import com.xiaoxu.logProject.entity.{DayCityVideoAccessStat, DayVideoAccessStat}
import com.xiaoxu.logProject.utils.LogConvertUtil
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}

/**
  * 第二步清洗：使用Spark完成数据清洗操作
  */
object SparkStatCleanApp {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    conf.registerKryoClasses(Array(classOf[DayCityVideoAccessStat], classOf[DayVideoAccessStat]))

    val spark =
      SparkSession
        .builder()
        .appName("SparkStatCleanJob")
        .master("local[8]")
        .config(conf)
        .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .getOrCreate()

    val path = "data/access.log"
    val logRDD = spark.sparkContext.textFile(path,6)
    //logRDD.take(20).foreach(println)

    //rdd ==> df
    val rowRDD: RDD[Row] = logRDD.map(x => LogConvertUtil.parseLog(x))
    val schema: StructType = LogConvertUtil.struct
    val logDataFrame: DataFrame = spark.createDataFrame(rowRDD, schema)

    logDataFrame.printSchema()
    logDataFrame.show(false)


    logDataFrame
      //.coalesce(4) //coalesce(int n) 只输出n个分区,只能从多到少（10 -->> 5.1TuningSpark.txt）
      .write
      .mode(SaveMode.Overwrite)//覆盖原文件
      //.partitionBy("day")
      .format("parquet")
      .save("data/output/SparkStatCleanApp/parquet1")

    spark.stop
  }

}
