package cn.pengpeng.dmp.etl

import cn.pengpeng.dmp.beans.Log
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}

/**
  * Created by root on 2018/12/4.
  */
object Bz2ParquetV2 {
  def main(args: Array[String]): Unit = {
    //校验参数
    if (args.length != 2) {
      println(
        """
          |cn.pengpeng.dmp.Bz2Parquet
          |参数：dateInputPath,outputPath
        """.stripMargin)
      sys.exit()
    }

    //模式匹配参数
    val Array(dataInputPath, outputPath) = args

    val conf = new SparkConf()
      .setAppName("Bz2Parquet")
      .setMaster("local[*]")
    //.set("spark.sql.parquet.compression.codec","snappy")
    //.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()

    //读取数据
    val lines: Dataset[String] = spark.read.textFile(dataInputPath)

    implicit val mapEncoder = org.apache.spark.sql.Encoders.kryo[List[String]]
    val linesAndArray = lines.rdd.map(line => line.split(",", -1))

    val filterLines = linesAndArray.filter(arr => arr.length >= 85)

    val caseRdd: RDD[Log] = filterLines.map(Log(_))
    //创建 dataframe
    val dataFrame: DataFrame = spark.createDataFrame(caseRdd)
    //保存数据到 parquet 文件中
    dataFrame.write.mode(SaveMode.Overwrite).parquet(outputPath)

    // 释放资源
    spark.stop()
  }

}
