package com.spark.prepareJob.etl

import com.spark.beans.Logs
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.io.SnappyCompressionCodec
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.SparkSession

/**
  * 将原始的广告日志数据etl清洗处理后转换为DataSet或DataFrame然后落地到指定的文件中
  */
object AdDataStandarJob {
  def main(args: Array[String]): Unit = {

    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.spark-project").setLevel(Level.WARN)

    val conf = new SparkConf().setAppName("AdDataStandarJob")
      .setMaster("local[*]")
      .set("spark.serializer",classOf[KryoSerializer].getName)//kryo序列化
      .registerKryoClasses(Array(classOf[Logs]))//注册Logs类
      .set("spark.io.compression.codec",classOf[SnappyCompressionCodec].getName)//指定压缩方式
    val sparkSession = SparkSession.builder().config(conf).getOrCreate()

    //sparkContext加载数据
    val line = sparkSession.sparkContext.textFile("file:///E:/scala/projectdmp/data/data.txt")

    //导入隐式转换
    import sparkSession.implicits._
    //处理数据
    val originDataSet = line.map(line => {
      Logs.line2Logs(line)
    }).toDS()

    originDataSet.printSchema()
    originDataSet.show()

    //DataSet数据导出。默认paquert方式
    //originDataSet.write.save("e:/out/standard/ad/")
    sparkSession.stop()
  }
}
