package com.bkd.tools

import com.bkd.beans.Log
import com.bkd.util.{NBF, SchemaUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object Biz2Parquet2 {

  def main(args: Array[String]): Unit = {

    //1:确定参数
    //预处理文件 logInputPath
    //处理输出目录 resultOutPath
    if (args.length != 3) {
      //错误提示
      println(
        """
          |com.bkd.tools.Biz2Parquet2
          |参数：
          |logInputPath
          |compression <snappy,gzip,lzo>
          |resultOutPath
        """.stripMargin
      )
      sys.exit()
    }

    var Array(logInputPath, compression, resultOutPath) = args
    //使用spark 需要什么 conf sc sqlContext
    val conf = new SparkConf()
      .setAppName(s"${this.getClass.getSimpleName}")
      .setMaster("local[*]")
      //设置 序列化格式
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      conf.registerKryoClasses(Array(classOf[Log]))

    //通过conf获取sparkContext
    val sc = new SparkContext(conf)
    //获取sqlContext
    val sqlContext = new SQLContext(sc)
    //设置数据格式的压缩  snappy压缩格式
    sqlContext.setConf("spark.sql.parquet.compression.codec", compression)


    //读取文件
    val rowData: RDD[String] = sc.textFile(logInputPath)
    //处理数据
    val dataRow: RDD[Log] = rowData.map(line => line.split(",", line.length))
      .filter(_.length >= 85) //大于85的过滤掉
      .map(arr => Log(arr)
    )
    dataRow

    //使用sqlContext处理数据
    val dataFrame: DataFrame = sqlContext.createDataFrame(dataRow)
    //输出数据

    //dataFrame.write.parquet(resultOutPath)

    dataFrame.write.partitionBy("provincename","cityname").parquet(resultOutPath)
    sc.stop()
  }
}
