package com.need1

import com.typesafe.config.ConfigFactory
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 主要是将文本文件转换成parquet
  * Created by zhuang on 2018/3/1.
  */
object ToParquet {
  //自动加载配置文件
  private val load = ConfigFactory.load()

  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(ToParquet.getClass.getSimpleName)
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .set("spark.sql.parquet.compression.codec", "snappy")
    val sc: SparkContext = new SparkContext(conf)
    val qc: SQLContext = new SQLContext(sc)
    //隐式转换
    //读取文件
    val file: RDD[String] = sc.textFile(load.getString("sourceData"))

    //将数据分割,-1代表空值也会截取，会计算进入长度
    val data: RDD[Array[String]] = file.map(t => t.split(",", -1))

    //将数据少于84条的过滤掉
    val filter: RDD[Array[String]] = data.filter(t => {
      t.size >= 85
    })
    //将所有字段封装好
    val map: RDD[Row] = filter.map(t => {
      //自定义一个类传回一个封装好的Row
      Utils.mkRow(t)
    })
    //开始建立schma信息，因为每个schema信息都不一样，所以不能用下面官网的方法进行循环，要每个够进行设置
    //自定义一个方法进行封装，返回一个StructType类型的schema信息
    val schema = Utils.getSchema
    //将字段名和数据进行封装
    val cdf = qc.createDataFrame(map, schema)

    // 判断输出目录是否已经存在，如果存在则删除
    val fileSystem = FileSystem.get(sc.hadoopConfiguration)
    val path = new Path(load.getString("outData"))
    if (fileSystem.exists(path)) {
      fileSystem.delete(path, true)
    }

    //写出成parquet文件
    cdf.write.mode("overwrite").parquet(load.getString("outData"))

    sc.stop()

    //另一种方法，构建表头信息，见官网，所有类型都是StringType
    //val schemaString = "advertisersid\nadorderid\nadcreativeid\nadplatformproviderid\nsdkversion\nadplatformkey\nputinmodeltype\nrequestmode\nadprice\nadppprice\nrequestdate\nip\nappid\nappname\nuuid\ndevice\nclient\nosversion\ndensity\npw\nph\nlong\nlat\nprovincename\ncityname\nispid\nispname\nnetworkmannerid\nnetworkmannername\niseffective\nisbilling\nadspacetype\nadspacetypename\ndevicetype\nprocessnode\napptype\ndistrict\npaymode\nisbid\nbidprice\nwinprice\niswin\ncur\nrate\ncnywinprice\nimei\nmac\nidfa\nopenudid\nandroidid\nrtbprovince\nrtbcity\nrtbdistrict\nrtbstreet\nstoreurl\nrealip\nisqualityapp\nbidfloor\naw\nah\nimeimd5\nmacmd5\nidfamd5\nopenudidmd5\nandroididmd5\nimeisha1\nmacsha1\nidfasha1\nopenudidsha1\nandroididsha1\nuuidunknow\nuserid\niptype\ninitbidprice\nadpayment\nagentrate\nlomarkrate\nadxrate\ntitle\nkeywords\ntagid\ncallbackdate\nchannelid\nmediatype\nsessionid"
    /*val schema =
      StructType(
        schemaString.split("\n").map(fieldName => StructField(fieldName, StringType, true))) //true是这个字段可以为空
    map.foreach(println)*/
    //val df: DataFrame = file.toDF("")
    //df.write.mode("overwrite").parquet(load.getString("outData"))
  }
}
