package cn.dmp.tools

import cn.dmp.beans.{Log, RichString}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext

object Biz2ParquetV2 {

    def main(args: Array[String]): Unit = {

        val sparkConf = new SparkConf()
          .setMaster("local[*]")
          .setAppName("日志转Parquet文件格式")
          .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") // RDD | worker
          .set("spark.sql.parquet.compression.codec", "snappy")
          .registerKryoClasses(Array(classOf[Log])) // 注册自定义类所采用的序列化方式 -》kryo

        val sc = new SparkContext(sparkConf)

        // 接受参数
        val Array(dataInputPath, resultOutputPath) = args

        // 读取原始日志文件
        val rawData: RDD[String] = sc.textFile(dataInputPath)


        // 过滤非法数据
        val filtered: RDD[Array[String]] = rawData.map(_.split(",", -1)).filter(_.length >= 85)
        // log 是 product一个子类
        import RichString._
        val maped: RDD[Log] = filtered.map(Log.apply (_))

        val sQLContext = new SQLContext(sc)
        val dataFrame = sQLContext.createDataFrame(maped)

        dataFrame.write.parquet(resultOutputPath)

        sc.stop()
    }

}
