package org.yonggan.dmp.etl

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.yonggan.dmp.conf.ConfigManager

/**
  * 日志文件清洗
  */
object Bz2ParquetEtl {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("日志文件清洗")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(conf)
    // 读取压缩文件
    val textFile = sc.textFile(ConfigManager.ETL_INPUT)

    val sqlContext = new SQLContext(sc)

    sqlContext.setConf("spark.sql.parquet.compression.codec", "snappy")

    //处理过滤
    val filtered = textFile.map(_.split(ConfigManager.ETL_SPLILT_SYMBOL))
      .filter(_.length >= 85)

    import org.yonggan.dmp.utils.RichString._

    // 存储的数数据转换
    val rowRDD: RDD[Row] = filtered.map(arr => {
      Row(
        arr(0),
        arr(1).toIntx,
        arr(2).toIntx,
        arr(3).toIntx,
        arr(4).toIntx,
        arr(5),
        arr(6),
        arr(7).toIntx,
        arr(8).toIntx,
        arr(9).toDoublex,
        arr(10).toDoublex,
        arr(11),
        arr(12),
        arr(13),
        arr(14),
        arr(15),
        arr(16),
        arr(17).toIntx,
        arr(18),
        arr(19),
        arr(20).toIntx,
        arr(21).toIntx,
        arr(22),
        arr(23),
        arr(24),
        arr(25),
        arr(26).toIntx,
        arr(27),
        arr(28).toIntx,
        arr(29),
        arr(30).toIntx,
        arr(31).toIntx,
        arr(32).toIntx,
        arr(33),
        arr(34).toIntx,
        arr(35).toIntx,
        arr(36).toIntx,
        arr(37),
        arr(38).toIntx,
        arr(39).toIntx,
        arr(40).toDoublex,
        arr(41).toDoublex,
        arr(42).toIntx,
        arr(43),
        arr(44).toDoublex,
        arr(45).toDoublex,
        arr(46),
        arr(47),
        arr(48),
        arr(49),
        arr(50),
        arr(51),
        arr(52),
        arr(53),
        arr(54),
        arr(55),
        arr(56),
        arr(57).toIntx,
        arr(58).toDoublex,
        arr(59).toIntx,
        arr(60).toIntx,
        arr(61),
        arr(62),
        arr(63),
        arr(64),
        arr(65),
        arr(66),
        arr(67),
        arr(68),
        arr(69),
        arr(70),
        arr(71),
        arr(72),
        arr(73).toIntx,
        arr(74).toDoublex,
        arr(75).toDoublex,
        arr(76).toDoublex,
        arr(77).toDoublex,
        arr(78).toDoublex,
        arr(79),
        arr(80),
        arr(81),
        arr(82),
        arr(83),
        arr(84).toIntx
      )
    })

    // 提取到数据
    val resDF = sqlContext.createDataFrame(rowRDD, LogSchema.schema)

    resDF.write.parquet(ConfigManager.PARQUET_OUT)

    sc.stop()
  }

}
