package cn.edu360

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object LogToParquet {
  def main(args: Array[String]): Unit = {
    if (args.length!=2){
      println("cn.edu360.LogToParquet:请输入正确的参数:input,output")
      sys.exit(1)
    }
    val Array(input,output)=args
    //创建SparkConf对象,指定程序需要的参数
    val conf: SparkConf = new SparkConf()
      .setAppName(this.getClass.getSimpleName)
      .setMaster("local[*]")
      .set("spark.sql.parquet.compression.codec","snappy")//压缩
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .registerKryoClasses(Array(classOf[Shuxing]))
    //序列化
    val sc: SparkContext = new SparkContext(conf)
    //创建SqlContext
    val sqlcontext: SQLContext = new SQLContext(sc)
    val data: RDD[String] = sc.textFile(input)
     val shuxingRdd: RDD[Shuxing] = data.map(line=>Shuxing.makelog(line)).filter(_.nonEmpty).map(_.get)
    val dataframe: DataFrame = sqlcontext.createDataFrame(shuxingRdd)
   // dataframe.write.partitionBy("provincename").parquet(output)
   // dataframe.
    //shuxingRdd.foreach(println)
   // dataframe.write.parquet(output)

    sc.stop()
  }
}
