package com.dmp

import java.util.Properties

import com.typesafe.config.ConfigFactory
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * ClassName:ProCityRpt
  * Package:com.dmp
  * Desciption:
  *
  * @date:2019 /8/23 10:41
  * @author:17611219021 @sina.cn
  */
object ProCityRptV2 {
  def main(args: Array[String]): Unit = {
    if(args.length !=2){
      println(
        """
          |com.dmp.fileToParquet
          |参数：
          | inputpath
          | outputPath
        """.stripMargin)
      sys.exit()
    }
    val conf: SparkConf = new SparkConf()
    conf.setAppName(s"${this.getClass.getSimpleName}")
    conf.setMaster("local[*]")
    //指定keyo序列化器与snappy格式，默认是gzip格式
    conf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    conf.set("spark.sql.parquet.compression.codec","snappy")
    val sc: SparkContext = new SparkContext(conf)
    val Array(inputpath,outputPath)= args
    val sqlc: SQLContext = new SQLContext(sc)
    val df: DataFrame = sqlc.read.parquet(inputpath)
    df.registerTempTable("adlog")
    val reDF: DataFrame = sqlc.sql("select province,area,count(*) from adlog group by province,area")
    reDF.show()
    val configuration = sc.hadoopConfiguration
    val fs = FileSystem.get(configuration)
    val path = new Path(outputPath)
    if(fs.exists(path)){
      fs.delete(path,true)
    }

    val load = ConfigFactory.load()
    val properties = new Properties()
    properties.setProperty("user",load.getString("jdbc.user"))
    properties.setProperty("password",load.getString("jdbc.password"))
    //在表尾部追加数据
    reDF.write.mode(SaveMode.Append).jdbc(load.getString("jdbc.url"),load.getString("jdbc.tabname"),properties)
    sc.stop()
  }

}
