package com.yanduo.tools

import com.yanduo.beans.Log
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

/**
  * 日志转成parquet文件格式
  * -- 采用 自定义类的方式构建schema信息
  *
  * @author Gerry chan
  * @version 1.0
  */
object Bzip2ParquetV2 {

  def main(args: Array[String]): Unit = {
    //0 参数校验
    if (args.length != 3) {
      println(
        """
          |cn.dmp.tools.Bzip2Parquet
          |参数：
          | logInputPath
          | compressionCode <snappy,gzip,lzo>
          | resultOutputPath
          |
        """.stripMargin)
      sys.exit()
    }

    //1 接收程序参数
    val Array(logInputPath, compressionCode, resultOutputPath) = args

    //2 创建sparkconf --> sparkContext

    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    //RDD 序列化到磁盘 worker 与 worker之前的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    sparkConf.set("spark.sql.parquet.compression.codec", compressionCode)

    // 知识点：注册自定义类的序列化方式
    sparkConf.registerKryoClasses((Array(classOf[Log])))

    // 创建sparkContext
    val sc = new SparkContext(sparkConf)

    val sparkSession = new SparkSession(sc)

    //3 读取日志 (-1 代表切割到行尾) 过滤掉 长度大于 85 的行
    val dataLog: RDD[Log] = sc.textFile(logInputPath)
      //利用map 转换成 RDD[Array]
      .map(line => line.split(",", -1))
      .filter(_.length>=85)
      //利用map + 自定义类转换,
      .map(arr => Log(arr))

    //4 创建DataFrame
    val dataFrame = sparkSession.createDataFrame(dataLog)

    dataFrame.write
      .partitionBy("provicename", "cityname")
      .parquet(resultOutputPath)

    //6 关闭sc
    sc.stop()

  }
}
