package com.imooc.log

import com.imooc.log.SparkStatFormatJob.SetLogger
import com.imooc.log.util.AccessConvertUtil
import org.apache.spark.sql.{SaveMode, SparkSession}

/**
  * Created by zghgchao 2017/12/25 10:07
  * 使用Spark完成我们的数据清洗操作
  *
  */
object SparkStatCleanJob {
  def main(args: Array[String]): Unit = {
    SetLogger

    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("SparkStatCleanJob").getOrCreate()

    val accessRDD = spark.sparkContext.textFile("src/data/access.log")

//    accessRDD.take(4).foreach(println)
    /**
      * 2017-05-11 14:09:14	http://www.imooc.com/video/4500	304	218.75.35.226
      * 2017-05-11 15:25:05	http://www.imooc.com/video/14623	69	202.96.134.133
      * 2017-05-11 07:50:01	http://www.imooc.com/article/17894	115	202.96.134.133
      * 2017-05-11 02:46:43	http://www.imooc.com/article/17896	804	218.75.35.226
      */

    //RDD ==> DataFrame
    //SparkSession.createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame
    val accessDF = spark.createDataFrame(accessRDD.map(x => AccessConvertUtil.parseLog(x)),AccessConvertUtil.struct)

//    accessDF.printSchema()
    /**
      * root
      *   |-- url: string (nullable = true)
      *   |-- cmsType: string (nullable = true)
      *   |-- cmsId: long (nullable = true)
      *   |-- traffic: long (nullable = true)
      *   |-- ip: string (nullable = true)
      *   |-- city: string (nullable = true)
      *   |-- time: string (nullable = true)
      *   |-- day: string (nullable = true)
      */
//    accessDF.show(10,false)
    /**
      *    +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+
      *    |url                               |cmsType|cmsId|traffic|ip             |city|time               |day     |
      *    +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+
      *    |http://www.imooc.com/video/4500   |video  |4500 |304    |218.75.35.226  |浙江省 |2017-05-11 14:09:14|20170511|
      *    |http://www.imooc.com/video/14623  |video  |14623|69     |202.96.134.133 |广东省 |2017-05-11 15:25:05|20170511|
      *    |http://www.imooc.com/article/17894|article|17894|115    |202.96.134.133 |广东省 |2017-05-11 07:50:01|20170511|
      *    |http://www.imooc.com/article/17896|article|17896|804    |218.75.35.226  |浙江省 |2017-05-11 02:46:43|20170511|
      *    |http://www.imooc.com/article/17893|article|17893|893    |222.129.235.182|北京市 |2017-05-11 09:30:25|20170511|
      *    |http://www.imooc.com/article/17891|article|17891|407    |218.75.35.226  |浙江省 |2017-05-11 08:07:35|20170511|
      *    |http://www.imooc.com/article/17897|article|17897|78     |202.96.134.133 |广东省 |2017-05-11 19:08:13|20170511|
      *    |http://www.imooc.com/article/17894|article|17894|658    |222.129.235.182|北京市 |2017-05-11 04:18:47|20170511|
      *    |http://www.imooc.com/article/17893|article|17893|161    |58.32.19.255   |上海市 |2017-05-11 01:25:21|20170511|
      *    |http://www.imooc.com/article/17895|article|17895|701    |218.22.9.56    |安徽省 |2017-05-11 13:37:22|20170511|
      *    +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+

      */

    //-----------------数据清洗存储到目标地址------------------------
//    accessDF.write.format("parquet").partitionBy("day").save("src/data/clean")//存储为parquet格式，按day分区   输出3个分区的小文件
    // coalesce(1)输出指定分区数的小文件
    accessDF.coalesce(1).write.format("parquet").partitionBy("day").mode(SaveMode.Overwrite)//mode(SaveMode.Overwrite)覆盖已经存在的文件
      .save("src/data/clean")//存储为parquet格式，按day分区
    /**
      * 调优点：
      *     1) 控制文件输出的大小： coalesce
      *     2) 分区字段的数据类型调整：spark.sql.sources.partitionColumnTypeInference.enabled
      *     3) 批量插入数据库数据，提交使用batch操作
      */

    spark.stop()
  }

}
