package hou.etl

import hou.NumParse
import hou.beans.{LogSchema, Logs}
import hou.config.ConfigHelper
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 将日志文件转化成parquet文件
  */
object Daily2ParquetV2 {
  def main(args: Array[String]): Unit = {
    //sparkContext
    val conf = new SparkConf()
    conf.setAppName(s"${this.getClass.getName}")
    conf.setMaster("local[*]")
    conf.set("spark.serializer",ConfigHelper.serializer)
    //自定义类的注册序列化
    conf.registerKryoClasses(Array(classOf[Logs]))

    val sc = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    sQLContext.setConf("spark.sql.parquet.compression.codec","snappy")

    //读取数据
    val rawLog = sc.textFile(ConfigHelper.dmpDailyPath)

    //转换数据(patquet => row + schema = DataFrame)
    val filtered:RDD[Array[String]] = rawLog.map(_.split(",",-1)).filter(_.length>=85)

    //RDD
    val logRDD = filtered.map(Logs.apply(_))

    val dataFrame = sQLContext.createDataFrame(logRDD)
    val hadoopConfiguration = sc.hadoopConfiguration
    val fileSystem = FileSystem.get(hadoopConfiguration)

    val path = new Path(ConfigHelper.parquetPath)
    if (fileSystem.exists(path)){
      fileSystem.delete(path,true)
    }
    dataFrame.write.partitionBy("provincename","cityname").parquet(ConfigHelper.parquetPath)
    sc.stop()
  }
}
