package teacher

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext}
import xubo.wangcaifeng.love.Utils.Log

/**
  * 日志转成parquet文件，自定义类的方式
  * 为了解决case class 最多支持22个filed限制
  */
object Log2Deal {
  def main(args: Array[String]): Unit = {
    //参数的校验
    if (args.length != 2){
      println(
        """
          |cn.dmp.et1.Log2Deal
          |params:
          |dataInputPath  日志输入路径
          |outputPath  结果输出路径
        """.stripMargin
      )
      sys.exit()
    }
    //这相当于一个模式匹配
    val Array(dataInputPath,outputPath) = args
    val conf = new SparkConf()
      .setAppName(s"${this.getClass.getSimpleName}")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[Log]))
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    sqlContext.setConf("spark.sql.parquet.compression.codec", "snappy")
    //读取数据
    val rawLog = sc.textFile(dataInputPath).map(_.split(",", -1)).filter(_.length >= 85)
    //方式一
    //import sqlContext.implicits._
    //val dataFrame: DataFrame = rawLog.map(Log(_)).toDF

    //方式二
    val logRDD = rawLog.map(Log(_))
    val dataFrame = sqlContext.createDataFrame(logRDD)

    //存储数据
    dataFrame.write.partitionBy("provincename","cityname").parquet(outputPath)
    sc.stop()

  }


}
