package cn.dmp.test

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 转换文件格式为parquet，并设置压缩格式
  */
object ParquetChange {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      // 设置序列化方式， [rdd] [worker]
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      // 占用空间比较小
      .set("spark.rdd.compress", "true")
    val sc: SparkContext = new SparkContext(conf)
    //设置压缩方式
    sc.hadoopConfiguration.set("spark.sql.parquet.compression.codec","snappy")

    val logFile: RDD[String] = sc.textFile("H:\\bigdata\\大数据课程\\杨家伟视频\\项目二\\资料PDF\\2016-10-01_06_p1_invalid.1475274123982.log.FINISH.bz2")
    val sqlContext: SQLContext = new SQLContext(sc)
    //sqlContext.setConf("spark.sql.parquet.compression.codec","snappy")

    import sqlContext.implicits._
    logFile.foreach(println)
    //val logFiles: RDD[String] = logFile.repartition(1)
    val logDF: DataFrame = logFile.repartition(1).toDF()
    logDF.printSchema()
    logDF.write.mode(SaveMode.Overwrite).parquet("logFile2")
    sc.stop()
  }
}
