package com.dmp

import com.beans.AdLog
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * ClassName:fileToParquet
  * Package:com.dmp
  * Desciption:
  *
  * @date:2019 /8/22 17:42
  * @author:17611219021 @sina.cn
  */
object fileToParquetV2 {

  def main(args: Array[String]): Unit = {
    if(args.length !=2){
      println(
        """
          |com.dmp.fileToParquet
          |参数：
          | inputpath
          | outputPath
        """.stripMargin)
      sys.exit()
    }
    val Array(inputpath,outputPath)= args
    val conf: SparkConf = new SparkConf()
    conf.setAppName("${this.getClass.getSimpleName}")
    conf.setMaster("local[*]")
    //指定keyo序列化器与snappy格式，默认是gzip格式
    conf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    conf.set("spark.sql.parquet.compression.codec","snappy")
    //注册自定义类的序列化方式
    conf.registerKryoClasses(Array(classOf[AdLog]))

    val sc: SparkContext = new SparkContext(conf)
    val logRDD: RDD[AdLog] = sc.textFile(inputpath).map(
      _.split(",", -1)
    ).filter(_.length >= 2).map(AdLog(_))

    val configuration = sc.hadoopConfiguration
    val fs = FileSystem.get(configuration)
    val path = new Path(outputPath)
    if(fs.exists(path)){
      fs.delete(path,true)
    }

    val sqlContext: SQLContext = new SQLContext(sc)
    val dataFrame: DataFrame = sqlContext.createDataFrame(logRDD)
    dataFrame.write.parquet(outputPath)
    sc.stop()
  }

}
