package cn.eud360.xiangmu.dmp.etl

import cn.eud360.xiangmu.dmp.config.ConfigHelper
import cn.eud360.xiangmu.dmp.beans.Log
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}

//日志格式第二种实现方(自定义类)
object Bz2ParquetV2 {
  def main(args: Array[String]): Unit = {
    //读取数据(模板代码)
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("转换成parquet文件")
      .set("spark.serializer", ConfigHelper.serializer) //序列化
      .set("spark.sql.parquet.compression.codec", ConfigHelper.snappy)
      // 注册自定义类的序列化方式
      .registerKryoClasses(Array(classOf[Log]))

    val sc: SparkContext = new SparkContext(conf)
    //原始数据（bz）
    val datalog: RDD[String] = sc.textFile(ConfigHelper.dmplogpath)
    val yaoqiudata: RDD[Array[String]] = datalog.map(_.split(",", -1)).filter(_.length >= 85)
    //格式转换(Parquet<=row+schema=DataFrame)用sparksql转换
    val sqlcontext: SQLContext = new SQLContext(sc)
    import sqlcontext.implicits._
    val logdata: RDD[Log] = yaoqiudata.map(Log(_))
    val dframe: DataFrame = sqlcontext.createDataFrame(logdata)
    //兼容两种情况
    val hadoopconfiguration: Configuration = sc.hadoopConfiguration
    val system: FileSystem = FileSystem.get(hadoopconfiguration)
    val path: Path = new Path(ConfigHelper.parquetoutpath2)
    if(system.exists(path)){
      system.delete(path,true)
    }
   // dframe.write.mode(SaveMode.Overwrite).parquet(ConfigHelper.parquetoutpath2)
    //按照省份进行分区
    dframe.write.partitionBy("provincename").parquet(ConfigHelper.parquetoutpath2)
    sc.stop()
  }
}
