package com.imooc.log

import com.imooc.log.SparkStatFormatJob.SetLogger
import com.imooc.log.util.AccessConvertUtil
import org.apache.spark.sql.{SaveMode, SparkSession}

/**
  * Created by zghgchao 2017/12/25 10:07
  * 使用Spark完成我们的数据清洗操作
  * 使用Spark完成我们的数据清洗操作：运行在YARN之上
  */
object SparkStatCleanJobYARN {
  def main(args: Array[String]): Unit = {
    if (args.length != 2) {
      println("Usage：SparkStatCleanJobYARN <input> <output>")
      System.exit(1)
    }
    SetLogger
    val Array(inputPath, outputPath) = args
    val spark = SparkSession.builder()
//      .master("local[2]")
      .appName("SparkStatCleanJobYARN").getOrCreate()

    val accessRDD = spark.sparkContext.textFile(inputPath)

//    accessRDD.take(4).foreach(println)
    /**
      * 2017-05-11 14:09:14	http://www.imooc.com/video/4500	304	218.75.35.226
      * 2017-05-11 15:25:05	http://www.imooc.com/video/14623	69	202.96.134.133
      * 2017-05-11 07:50:01	http://www.imooc.com/article/17894	115	202.96.134.133
      * 2017-05-11 02:46:43	http://www.imooc.com/article/17896	804	218.75.35.226
      */

    //RDD ==> DataFrame
    //SparkSession.createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame
    val accessDF = spark.createDataFrame(accessRDD.map(x => AccessConvertUtil.parseLog(x)),AccessConvertUtil.struct)

//    accessDF.printSchema()
    /**
      * root
      *   |-- url: string (nullable = true)
      *   |-- cmsType: string (nullable = true)
      *   |-- cmsId: long (nullable = true)
      *   |-- traffic: long (nullable = true)
      *   |-- ip: string (nullable = true)
      *   |-- city: string (nullable = true)
      *   |-- time: string (nullable = true)
      *   |-- day: string (nullable = true)
      */
//    accessDF.show(10,false)
    /**
      *    +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+
      *    |url                               |cmsType|cmsId|traffic|ip             |city|time               |day     |
      *    +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+
      *    |http://www.imooc.com/video/4500   |video  |4500 |304    |218.75.35.226  |浙江省 |2017-05-11 14:09:14|20170511|
      *    |http://www.imooc.com/video/14623  |video  |14623|69     |202.96.134.133 |广东省 |2017-05-11 15:25:05|20170511|
      *    |http://www.imooc.com/article/17894|article|17894|115    |202.96.134.133 |广东省 |2017-05-11 07:50:01|20170511|
      *    |http://www.imooc.com/article/17896|article|17896|804    |218.75.35.226  |浙江省 |2017-05-11 02:46:43|20170511|
      *    |http://www.imooc.com/article/17893|article|17893|893    |222.129.235.182|北京市 |2017-05-11 09:30:25|20170511|
      *    |http://www.imooc.com/article/17891|article|17891|407    |218.75.35.226  |浙江省 |2017-05-11 08:07:35|20170511|
      *    |http://www.imooc.com/article/17897|article|17897|78     |202.96.134.133 |广东省 |2017-05-11 19:08:13|20170511|
      *    |http://www.imooc.com/article/17894|article|17894|658    |222.129.235.182|北京市 |2017-05-11 04:18:47|20170511|
      *    |http://www.imooc.com/article/17893|article|17893|161    |58.32.19.255   |上海市 |2017-05-11 01:25:21|20170511|
      *    |http://www.imooc.com/article/17895|article|17895|701    |218.22.9.56    |安徽省 |2017-05-11 13:37:22|20170511|
      *    +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+

      */

    //-----------------数据清洗存储到目标地址------------------------
//    accessDF.write.format("parquet").partitionBy("day").save("src/data/clean")//存储为parquet格式，按day分区   输出3个分区的小文件
    // coalesce(1)输出指定分区数的小文件
    accessDF.coalesce(1).write.format("parquet").partitionBy("day").mode(SaveMode.Overwrite)//mode(SaveMode.Overwrite)覆盖已经存在的文件
      .save(outputPath)//存储为parquet格式，按day分区
    /**
      * 调优点：
      *     1) 控制文件输出的大小： coalesce
      *     2) 分区字段的数据类型调整：spark.sql.sources.partitionColumnTypeInference.enabled
      *     3) 批量插入数据库数据，提交使用batch操作
      */

    spark.stop()
  }

  /**
    *  打包时要注意，saprk相关jar包加上
    *      <scope>provided</scope>
    *      pom.xml中需要添加如下plugin
    *       <plugin>
    *           <artifactId>maven-assembly-plugin</artifactId>
    *           <configuration>
    *               <archive>
    *                   <manifest>
    *                       <mainClass></mainClass>
    *                   </manifest>
    *               </archive>
    *               <descriptorRefs>
    *                   <descriptorRef>jar-with-dependencies</descriptorRef>
    *               </descriptorRefs>
    *           </configuration>
    *       </plugin>
    *
    *
    *    maven打包：mvn assembly:assembly
    *    数据上传到hdfs上：
    *         创建目录：hadoop fs -mkdir -p /imooc/input
    *         上传文件：hadoop fs -put home/hadoop/data/access.log /imooc/input
    *
    *      spark-submit提交：
    *           ./bin/spark-submit \
    *           --class com.imooc.log.util.SparkStatCleanJobYARN \
    *           --master yarn \
    *           --executor-memory 1G \
    *           --num-executors 1 \
    *           --files /home/hadoop/lib/ipDatabase.csv,/home/hadoop/lib/ipRegion.xlsx \
    *           /home/hadoop/lib/sql-1.0-jar-with-dependencies.jar \
    *           hdfs://172.17.66.51:8020/imooc/input/access.log hdfs://172.17.66.51:8020/imooc/clean
    *
    *       启动spark-shell，查看运行结果
    *           saprk.read.format("parquet").load("hdfs://172.17.66.51:8020/imooc/clean/day=20170511/part-00000-84e7b9a6-c2e6-481b-8d50-02d42b310539.c000.snappy.parquet").show(false)

    *
    */

}
