package cn.dmp.service

import cn.dmp.beans.{LogInfo}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 转换文件格式为parquet，并设置压缩格式
  */
object ParquetChanges {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      //.setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      // 设置序列化方式， [rdd] [worker]
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      // 占用空间比较小
      .set("spark.rdd.compress", "true")
    val sc: SparkContext = new SparkContext(conf)
    //设置压缩方式
    sc.hadoopConfiguration.set("spark.sql.parquet.compression.codec","snappy")

    //val logFile: RDD[String] = sc.textFile("/root/yarndir/2016-10-01_06_p1_invalid.1475274123982.log.FINISH")
    val logFile: RDD[String] = sc.textFile("/root/yarndir/")
    val mapInfo: RDD[Array[String]] = logFile.map(t => {
      val arr: Array[String] = t.split(",", -1)
      val newList: Array[String] = arr.map(t => {
        val info: String = if ("".equals(t)) "0" else t
        info
      })
      newList
    })
    val dataInfo: RDD[Array[String]] = mapInfo.filter(arr=>arr.length==85)

    val results: RDD[LogInfo] = dataInfo.map(arr => {

      new LogInfo(arr(0), arr(1).toInt, arr(2).toInt, arr(3).toInt, arr(4).toInt, arr(5), arr(6), arr(7).toInt,
        arr(8).toInt, arr(9).toDouble, arr(10).toDouble, arr(11), arr(12), arr(13), arr(14), arr(15), arr(16), arr(17).toInt,
        arr(18), arr(19), arr(20).toInt, arr(21).toInt, arr(22), arr(23), arr(24), arr(25), arr(26).toInt, arr(27), arr(28).toInt,
        arr(29), arr(30).toInt, arr(31).toInt, arr(32).toInt, arr(33), arr(34).toInt, arr(35).toInt, arr(36).toInt, arr(37),
        arr(38).toInt, arr(39).toInt, arr(40).toDouble, arr(41).toDouble, arr(42).toInt, arr(43), arr(44).toDouble, arr(45).toDouble,
        arr(46), arr(47), arr(48), arr(49), arr(50), arr(51), arr(52), arr(53), arr(54), arr(55), arr(56), arr(57).toInt,
        arr(58).toDouble, arr(59).toInt, arr(60).toInt, arr(61), arr(62), arr(63), arr(64), arr(65), arr(66), arr(67), arr(68),
        arr(69), arr(70), arr(71), arr(72), arr(73).toInt, arr(74).toDouble, arr(75).toDouble, arr(76).toDouble, arr(77).toDouble,
        arr(78).toDouble, arr(79), arr(80), arr(81), arr(82), arr(83), arr(84).toInt)
    })

    val sqlContext: SQLContext = new SQLContext(sc)
    import sqlContext.implicits._
    val dataDF: DataFrame = results.toDF()
    //val logFiles: RDD[String] = logFile.repartition(1)
    //val logDF: DataFrame = dataInfo.toDF("sessionid","advertisersid","adorderid","adcreativeid","adplatformproviderid","sdkversion","adplatformkey","putinmodeltype","requestmode","adprice","adppprice","requestdate","ip","appid","appname","uuid","device","client","osversion","density","pw","ph","long","lat","provincename","cityname","ispid","ispname","networkmannerid","networkmannername","iseffective","isbilling","adspacetype","adspacetypename","devicetype","processnode","apptype","district","paymode","isbid","bidprice","winprice","iswin","cur","rate","cnywinprice","imei","mac","idfa","openudid","androidid","rtbprovince","rtbcity","rtbdistrict","rtbstreet","storeurl","realip","isqualityapp","bidfloor","aw","ah","imeimd5","macmd5","idfamd5","openudidmd5","androididmd5","imeisha1","macsha1","idfasha1","openudidsha1","androididsha1","uuidunknow","userid","iptype","initbidprice","adpayment","agentrate","lomarkrate","adxrate","title","keywords","tagid","callbackdate","channelid","mediatype")
    dataDF.printSchema()
    /*dataDF.registerTempTable("t_logfile")
    sqlContext.sql("select * from t_logfile").show(100)*/
    //println(sqlContext.sql("select * from t_logfile").count()+"++++++++++++")

    dataDF.write.mode(SaveMode.Overwrite).save("/root/yarndir/logFile")

    sc.stop()
  }
}
