package cn.eud360.xiangmu.dmp.etl

import java.io.File

import cn.eud360.xiangmu.dmp.config.ConfigHelper
import cn.eud360.xiangmu.dmp.beans.LogSchema
import cn.eud360.xiangmu.dmp.utils.NumParse
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

/*将bz日志装换成Parquet
*
* */
object Bz2Parquet {
  def main(args: Array[String]): Unit = {
    //读取数据(模板代码)
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("转换成parquet文件")
      .set("spark.serializer", ConfigHelper.serializer) //序列化
      .set("spark.sql.parquet.compression.codec", ConfigHelper.snappy)
    val sc: SparkContext = new SparkContext(conf)
    //原始数据（bz）
    val datalog: RDD[String] = sc.textFile(ConfigHelper.dmplogpath)
    val yaoqiudata: RDD[Array[String]] = datalog.map(_.split(",", -1)).filter(_.length >= 85)
    //格式转换(Parquet<=row+schema=DataFrame)用sparksql转换
    val sqlcontext: SQLContext = new SQLContext(sc)
    import sqlcontext.implicits._
    //转换成Row
    val rdddata: RDD[Row] = yaoqiudata.map(arr => Row(
      arr(0),
      NumParse.toInt(arr(1)),
      NumParse.toInt(arr(2)),
      NumParse.toInt(arr(3)),
      NumParse.toInt(arr(4)),
      arr(5),
      arr(6),
      NumParse.toInt(arr(7)),
      NumParse.toInt(arr(8)),
      NumParse.toDouble(arr(9)),
      NumParse.toDouble(arr(10)),
      arr(11),
      arr(12),
      arr(13),
      arr(14),
      arr(15),
      arr(16),
      NumParse.toInt(arr(17)),
      arr(18),
      arr(19),
      NumParse.toInt(arr(20)),
      NumParse.toInt(arr(21)),
      arr(22),
      arr(23),
      arr(24),
      arr(25),
      NumParse.toInt(arr(26)),
      arr(27),
      NumParse.toInt(arr(28)),
      arr(29),
      NumParse.toInt(arr(30)),
      NumParse.toInt(arr(31)),
      NumParse.toInt(arr(32)),
      arr(33),
      NumParse.toInt(arr(34)),
      NumParse.toInt(arr(35)),
      NumParse.toInt(arr(36)),
      arr(37),
      NumParse.toInt(arr(38)),
      NumParse.toInt(arr(39)),
      NumParse.toDouble(arr(40)),
      NumParse.toDouble(arr(41)),
      NumParse.toInt(arr(42)),
      arr(43),
      NumParse.toDouble(arr(44)),
      NumParse.toDouble(arr(45)),
      arr(46),
      arr(47),
      arr(48),
      arr(49),
      arr(50),
      arr(51),
      arr(52),
      arr(53),
      arr(54),
      arr(55),
      arr(56),
      NumParse.toInt(arr(57)),
      NumParse.toDouble(arr(58)),
      NumParse.toInt(arr(59)),
      NumParse.toInt(arr(60)),
      arr(61),
      arr(62),
      arr(63),
      arr(64),
      arr(65),
      arr(66),
      arr(67),
      arr(68),
      arr(69),
      arr(70),
      arr(71),
      arr(72),
      NumParse.toInt(arr(73)),
      NumParse.toDouble(arr(74)),
      NumParse.toDouble(arr(75)),
      NumParse.toDouble(arr(76)),
      NumParse.toDouble(arr(77)),
      NumParse.toDouble(arr(78)),
      arr(79),
      arr(80),
      arr(81),
      arr(82),
      arr(83),
      NumParse.toInt(arr(84))
    ))

    val frame: DataFrame = sqlcontext.createDataFrame(rdddata, LogSchema.schema)
   //兼容两种情况
    val hadoopconfiguration: Configuration = sc.hadoopConfiguration
    val system: FileSystem = FileSystem.get(hadoopconfiguration)
    val path: Path = new Path(ConfigHelper.parquetoutpath)
    if(system.exists(path)){
      system.delete(path,true)
    }
  /*  val file: File = new File(ConfigHelper.parquetoutpath)
    if (file.exists()) {
      //file.delete()
      FileUtils.deleteDirectory(file)
    }*/
    //存储Parquet文件  (覆盖)也可以删除也可以
    frame.write.mode(SaveMode.Overwrite).parquet(ConfigHelper.parquetoutpath)
    //释放资源
    sc.stop()
  }
}
