package cn.sheep.dolphin.etl

import cn.sheep.dolphin.bean.AdLog
import cn.sheep.dolphin.common.DolphinAppComm
import cn.sheep.dolphin.utils.FileHelper
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.SQLContext

/** 将bz2日志文件转换成parquet文件
  * author: old sheep
  * QQ: 64341393 
  * Created 2018/11/28
  */
object Bz2ParquetV2 {

	def main(args: Array[String]): Unit = {


		// 检验参数
		if (args.length != 2) {
			println(
				"""
				  |Usage: cn.sheep.dolphin.etl.Bz2ParquetV2
				  |Param:
				  |	bz2InputPath	bz2日志文件的输入路径
				  | parquetOutPath	parquet文件的输出路径
				""".stripMargin)
			sys.exit(-1) // -1 非正常退出
		}

		// 接收参数(模式匹配了)
		val Array(bz2InputPath, parquetOutPath) = args

		// spark序列化优化参数
		val sparkParams = Map[String, String](
			"spark.serializer" -> classOf[KryoSerializer].getName,
			"spark.sql.parquet.compression.codec" -> "snappy" // 设置parquet文件的压缩格式
		)

		val sc = DolphinAppComm.createSparkContext("将bz2日志文件转换成parquet文件", sparkParams)


		// 读取离线的bz2日志文件
		val data = sc.textFile(bz2InputPath)/*.repartition()*/

		// 过滤非法数据
		val filteredRDD: RDD[Array[String]] = data.map(_.split(",", -1)).filter(_.size >= 85)

		// parquet <- DataFrame(几种创建方式) <- SQLContext <- RDD
		val sqlc = new SQLContext(sc)
		// sqlc.setConf("spark.sql.parquet.compression.codec", "snappy")


		// RDD[Array[String]] -> RDD[AdLog]
		val adLogRDD: RDD[AdLog] = filteredRDD.map(AdLog(_))
		val dataFrame = sqlc.createDataFrame(adLogRDD)

		// 删除目标目录
		FileHelper.deleteDir(parquetOutPath, sc)

		// partitionBy 对数据按照省份进行分区
		dataFrame.write/*.partitionBy("provincename", "cityname")*/.parquet(parquetOutPath)

		sc.stop()
	}

}
