package cn.dmp.tools

import cn.dmp.beans.CustomCl
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Administrator on 2018/4/20.
  */
object Biz2Parquet2 {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
    .setMaster("local[*]").setAppName("Biz2Parquet2")
    .set("spark.serializer","org.apache.spark.serializer.KryoSerializer")//RDD work的序列化方式
    .set("spark.sql.parquet.compression.codec","snappy")  //parquet压缩格式
    .registerKryoClasses(Array(classOf[CustomCl]))  //注册自动定义类的序列化方式

    val sc = new SparkContext(conf)

    val Array(dataInputPath,dataOutPath)=args

    val data = sc.textFile(dataInputPath)

    val  filterData: RDD[Array[String]] = data.map(t=>t.split(",",-1)).filter(t=>t.length>=85)

    val map = filterData.map(CustomCl(_))

    val sQLContext = new SQLContext(sc)
    //创建dataFrame
    val dataFrame = sQLContext.createDataFrame(map)

    //输出
    dataFrame.write.parquet(dataOutPath)

    sc.stop()

  }

}
