package cn.sheep.dmp.etl

import cn.sheep.dmp.beans.Log
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext

/**
  * Sheep.Old @ 64341393
  * Created 2018/3/28
  */
object Biz2ParquetPlus {

    def main(args: Array[String]): Unit = {

        // 接受参数
        if (args.length != 3) {
            println(
                """
                  |cn.sheep.dmp.etl.Biz2Parquet
                  |参数：
                  |     dataInputPath <原始日志所在路径>
                  |     compressionCode <压缩格式>
                  |     outputPath  <转换后的日志存储路径>
                """.stripMargin)
            sys.exit()
        }

        val Array(dataInputPath, compressionCode,outputPath) = args

        val sparkConf = new SparkConf().setAppName("日志转parquet文件")
          .setMaster("local[*]")
          .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") // RDD
          .set("spark.sql.parquet.compression.codec", compressionCode)
          .registerKryoClasses(Array(classOf[Log])) // 注册自定义类的序列化方式为kryo

        val sc = new SparkContext(sparkConf)
        val sqlc = new SQLContext(sc)

        // 读取数据
        val filtered: RDD[Array[String]] = sc.textFile(dataInputPath).map(_.split(",", -1)).filter(_.length >= 85)

        // RDD[log <: product]
        val rdd = filtered.map(Log(_))

        val dataFrame = sqlc.createDataFrame(rdd)
//        dataFrame.write.parquet(outputPath)
        // 数据按照省份进行分区
        dataFrame.write.parquet(outputPath)

        sc.stop()
    }

}
