package cn.devil.two


import cn.devil.two.beans.Log
import cn.devil.two.utils.NumberUtils
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapred.Utils.OutputFileUtils.OutputFilesFilter
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object Leopard {
  def main(args: Array[String])= {
    if(args.length!=2){
      println("cn.devil.two.Leopard:" +
        "inputFilePath,outPutFilePath")
      sys.exit(-1)
    }
    val Array(inputFilePath,outPutFilePath) = args
    System.setProperty("HADOOP_USER_NAME","root")
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      .set("spark.sql.parquet.compression.codec","snappy")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    //没有这个隐式转换无法   toDF()
    import sqlContext.implicits._
    //hdfs://Devil21:9000/leopard/
    val data: RDD[String] = sc.textFile(inputFilePath)
    val filDat: RDD[Array[String]] = data.map(_.split(",",-1)).filter(_.length>=85)
    import cn.devil.two.beans.DevilString._
    val LastDat = filDat.map(f=>{
      new Log(
        f(0),
        NumberUtils.str2Int(f(1)),
        f(2).toInt2,
        f(3).toInt2,
        f(4).toInt2,
        f(5),
        f(6),
        f(7).toInt2,
        f(8).toInt2,
        f(9).toDouble2,
        f(10).toDouble2,
        f(11),
        f(12),
        f(13),
        f(14),
        f(15),
        f(16),
        f(17).toInt2,
        f(18),
        f(19),
        f(20).toInt2,
        f(21).toInt2,
        f(22),
        f(23),
        f(24),
        f(25),
        f(26).toInt2,
        f(27),
        f(28).toInt2,
        f(29),
        f(30).toInt2,
        f(31).toInt2,
        f(32).toInt2,
        f(33),
        f(34).toInt2,
        f(35).toInt2,
        f(36).toInt2,
        f(37),
        f(38).toInt2,
        f(39).toInt2,
        f(40).toDouble2,
        f(41).toDouble2,
        f(42).toInt2,
        f(43),
        f(44).toDouble2,
        f(45).toDouble2,
        f(46),
        f(47),
        f(48),
        f(49),
        f(50),
        f(51),
        f(52),
        f(53),
        f(54),
        f(55),
        f(56),
        f(57).toInt2,
        f(58).toDouble2,
        f(59).toInt2,
        f(60).toInt2,
        f(61),
        f(62),
        f(63),
        f(64),
        f(65),
        f(66),
        f(67),
        f(68),
        f(69),
        f(70),
        f(71),
        f(72),
        f(73).toInt2,
        f(74).toDouble2,
        f(75).toDouble2,
        f(76).toDouble2,
        f(77).toDouble2,
        f(78).toDouble2,
        f(79),
        f(80),
        f(81),
        f(82),
        f(83),
        f(84).toInt2
      )
    })
    val dataframe = sqlContext.createDataFrame(LastDat)

    val fs = FileSystem.get(sc.hadoopConfiguration)
    val path = new Path(outPutFilePath)
    if (fs.exists(path)) {
      fs.delete(path, true)
    }
    //hdfs://Devil21:9000/leopard-parquet/
    dataframe.write.parquet(outPutFilePath)
    // filDat.foreach(println)
    sc.stop()
  }
}
