package com.dmp

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * ClassName:fileToParquet
  * Package:com.dmp
  * Desciption:
  *
  * @date:2019 /8/22 17:42
  * @author:17611219021 @sina.cn
  */
object fileToParquet {

  def main(args: Array[String]): Unit = {
    if(args.length !=2){
      println(
        """
          |com.dmp.fileToParquet
          |参数：
          | inputpath
          | outputPath
        """.stripMargin)
      sys.exit()
    }
    val Array(inputpath,outputPath)= args
    val conf: SparkConf = new SparkConf()
    conf.setAppName("${this.getClass.getSimpleName}")
    conf.setMaster("local[*]")
    //指定keyo序列化器与snappy格式，默认是gzip格式
    conf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    conf.set("spark.sql.parquet.compression.codec","snappy")
    val sc: SparkContext = new SparkContext(conf)
    //对业务数据进行ETL，字段小于2个字段，就过滤掉
    val logRDD = sc.textFile(inputpath).map(
      _.split(",", -1)
    ).filter(_.length >= 2).map( arry =>(Row(arry(0),arry(1))))

    val logStruct = StructType(Seq(
      StructField("province",StringType),
      StructField("area",StringType)
    ))

    val sqlContext: SQLContext = new SQLContext(sc)
    val dataFrame: DataFrame = sqlContext.createDataFrame(logRDD,logStruct)
    dataFrame.write.parquet(outputPath)
    sc.stop()
  }

}
