package cn.edu360t.etl


import cn.edu360t.beans.Log
import cn.edu360t.config.ConfigHepler
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext

//git
object Bz2ParquetV2 {
  def main(args: Array[String]): Unit = {
    //通过配置文件

    //先把模板代码写上
    val conf: SparkConf = new SparkConf()
      //本地运行设置
      .setMaster("local[*]")
      .setAppName("日志格式转换为Parquet文件")
      //设置序列化
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //设置压缩方式
      .set("spark.sql.parquet.compression", "codec	snappy")
      // 修改自定义类的序列化方式
      .registerKryoClasses(Array(classOf[Log]))
    //SparkContext的对象 用于读取静态文件
    val sc: SparkContext = new SparkContext(conf)
    //创建 SQLcontext
    val sqlcontext: SQLContext = new SQLContext(sc)
    //导入隐式转换
    import sqlcontext.implicits._
    //业务逻辑
    //1:读取数据
    val yuanData: RDD[String] = sc.textFile(ConfigHepler.bz2path)
    // 整理数据
    val filteredData = yuanData.map(line => line.split(",", -1)).filter(_.length >= 85)
    // 将数据整理成Log
    // 将数据整理成Log
    val maped: RDD[Log] = filteredData.map(Log(_))

    val df = sqlcontext.createDataFrame(maped)

    // 写出数据
    df.write.parquet(ConfigHepler.parquetPath) //./*partitionBy("provincename", "cityname").*/parquet(ConfigHepler.parquetPath)

    sc.stop()
  }
}
